code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from math import ceil
def _a ( lowerCAmelCase , lowerCAmelCase )-> Tuple:
SCREAMING_SNAKE_CASE_ = list(range(0 , lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
SCREAMING_SNAKE_CASE_ = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase )
# Missing blocks
SCREAMING_SNAKE_CASE_ = [i for i in blocks if i not in device_map_blocks]
SCREAMING_SNAKE_CASE_ = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(lowerCAmelCase ) )
if len(lowerCAmelCase ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(lowerCAmelCase ) )
if len(lowerCAmelCase ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(lowerCAmelCase ) )
def _a ( lowerCAmelCase , lowerCAmelCase )-> Optional[int]:
SCREAMING_SNAKE_CASE_ = list(range(lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = int(ceil(n_layers / len(lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ = [layers[i : i + n_blocks] for i in range(0 , lowerCAmelCase , lowerCAmelCase )]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) ) | 360 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
def __init__( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Tuple=3 , snake_case__ : Union[str, Any]=32 , snake_case__ : Union[str, Any]=3 , snake_case__ : str=10 , snake_case__ : List[Any]=[10, 20, 30, 40] , snake_case__ : Optional[Any]=[1, 1, 2, 1] , snake_case__ : str=True , snake_case__ : str=True , snake_case__ : List[str]="relu" , snake_case__ : Tuple=3 , snake_case__ : str=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = embeddings_size
SCREAMING_SNAKE_CASE_ = hidden_sizes
SCREAMING_SNAKE_CASE_ = depths
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = len(snake_case__ )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values, labels
def __a ( self : int ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __a ( self : str , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFResNetModel(config=snake_case__ )
SCREAMING_SNAKE_CASE_ = model(snake_case__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __a ( self : int , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFResNetForImageClassification(snake_case__ )
SCREAMING_SNAKE_CASE_ = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase_ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowerCAmelCase__ =(TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ =(
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ =False
lowerCAmelCase__ =False
lowerCAmelCase__ =False
lowerCAmelCase__ =False
lowerCAmelCase__ =False
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFResNetModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def __a ( self : Optional[Any] ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self : List[Any] ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __a ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __a ( self : Tuple ):
"""simple docstring"""
pass
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : int ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Dict ):
SCREAMING_SNAKE_CASE_ = model_class(snake_case__ )
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ = self.model_tester.num_stages
self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE_ = layer_type
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def __a ( self : Tuple ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = TFResNetModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _a ( )-> Tuple:
SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase_ (unittest.TestCase ):
@cached_property
def __a ( self : List[Any] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=snake_case__ , return_tensors='tf' )
# forward pass
SCREAMING_SNAKE_CASE_ = model(**snake_case__ )
# verify the logits
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , snake_case__ )
SCREAMING_SNAKE_CASE_ = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) ) | 360 | 1 |
'''simple docstring'''
import csv
import tweepy
# Twitter API credentials
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = ''
def __a ( lowerCAmelCase__ : str ):
# authorize twitter, initialize tweepy
a__ : Union[str, Any] = tweepy.OAuthHandler(lowerCAmelCase__ , lowerCAmelCase__ )
auth.set_access_token(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple = tweepy.API(lowerCAmelCase__ )
# initialize a list to hold all the tweepy Tweets
a__ : str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
a__ : List[Any] = api.user_timeline(screen_name=lowerCAmelCase__ , count=200 )
# save most recent tweets
alltweets.extend(lowerCAmelCase__ )
# save the id of the oldest tweet less one
a__ : List[Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCAmelCase__ ) > 0:
print(F'getting tweets before {oldest}' )
# all subsequent requests use the max_id param to prevent duplicates
a__ : Optional[int] = api.user_timeline(
screen_name=lowerCAmelCase__ , count=200 , max_id=lowerCAmelCase__ )
# save most recent tweets
alltweets.extend(lowerCAmelCase__ )
# update the id of the oldest tweet less one
a__ : Tuple = alltweets[-1].id - 1
print(F'...{len(lowerCAmelCase__ )} tweets downloaded so far' )
# transform the tweepy tweets into a 2D array that will populate the csv
a__ : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F'new_{screen_name}_tweets.csv' , '''w''' ) as f:
a__ : Any = csv.writer(lowerCAmelCase__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(lowerCAmelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('FirePing32')
| 703 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( lowerCAmelCase__ : List[Any] ):
a__ : Union[str, Any] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
if "model" in sd.keys():
a__ : List[str] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
a__ : Optional[Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCAmelCase__ )
a__ : Any = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
a__ : Dict = sd.pop(lowerCAmelCase__ )
a__ : Union[str, Any] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
a__ : Optional[Any] = sd[key]
# We split QKV in separate Q,K,V
a__ : Optional[Any] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
a__ : List[str] = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
a__ : Optional[int] = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
a__ : Union[str, Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
a__ , a__ , a__ : Optional[int] = torch.split(lowerCAmelCase__ , depth // 3 , dim=0 )
a__ : Tuple = q
a__ : Union[str, Any] = k
a__ : Dict = v
del sd[key]
return sd
@torch.no_grad()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any]=None ):
a__ : Any = load_checkpoint(lowerCAmelCase__ )
if config is not None:
a__ : List[Any] = OPTConfig.from_pretrained(lowerCAmelCase__ )
else:
a__ : Union[str, Any] = OPTConfig()
a__ : Union[str, Any] = OPTModel(lowerCAmelCase__ ).half().eval()
model.load_state_dict(lowerCAmelCase__ )
# Check results
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 340 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCamelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->None:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 224}
A_ : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A_ : List[str] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A_ : int = do_resize
A_ : Optional[int] = size
A_ : Optional[Any] = do_center_crop
A_ : List[Any] = crop_size
A_ : Union[str, Any] = resample
A_ : Union[str, Any] = do_rescale
A_ : List[str] = rescale_factor
A_ : List[str] = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : Union[str, Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" in size:
A_ : List[Any] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
elif "height" in size and "width" in size:
A_ : Any = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : List[Any] = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , )->np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A_ : Tuple = to_numpy_array(_SCREAMING_SNAKE_CASE )
if do_resize:
A_ : Optional[Any] = self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE )
if do_center_crop:
A_ : str = self.center_crop(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE )
if do_rescale:
A_ : Union[str, Any] = self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE )
if do_normalize:
A_ : int = self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return image
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )->PIL.Image.Image:
'''simple docstring'''
A_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
A_ : str = resample if resample is not None else self.resample
A_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
A_ : Optional[Any] = image_std if image_std is not None else self.image_std
A_ : Tuple = size if size is not None else self.size
A_ : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Tuple = crop_size if crop_size is not None else self.crop_size
A_ : Tuple = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
A_ : int = make_batched(_SCREAMING_SNAKE_CASE )
A_ : str = [
[
self._preprocess_image(
image=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size=_SCREAMING_SNAKE_CASE , do_rescale=_SCREAMING_SNAKE_CASE , rescale_factor=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , )
for img in video
]
for video in videos
]
A_ : Union[str, Any] = {'''pixel_values''': videos}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 590 |
UpperCamelCase = 256
# Modulus to hash a string
UpperCamelCase = 100_0003
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Any = len(SCREAMING_SNAKE_CASE )
A_ : int = len(SCREAMING_SNAKE_CASE )
if p_len > t_len:
return False
A_ : int = 0
A_ : Dict = 0
A_ : Optional[int] = 1
# Calculating the hash of pattern and substring of text
for i in range(SCREAMING_SNAKE_CASE ):
A_ : List[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A_ : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A_ : Tuple = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A_ : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _SCREAMING_SNAKE_CASE ( ):
A_ : List[Any] = '''abc1abc12'''
A_ : str = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
A_ : Optional[Any] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 2)
A_ : List[str] = '''ABABX'''
A_ : Tuple = '''ABABZABABYABABX'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 3)
A_ : Optional[int] = '''AAAB'''
A_ : Optional[Any] = '''ABAAAAAB'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 4)
A_ : Optional[Any] = '''abcdabcy'''
A_ : str = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 5)
A_ : Tuple = '''Lü'''
A_ : Dict = '''Lüsai'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Dict = '''Lue'''
assert not rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 590 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A = get_tests_dir('fixtures/test_sentencepiece.model')
A = {'target_lang': 'fi', 'source_lang': 'en'}
A = '>>zh<<'
A = 'Helsinki-NLP/'
if is_torch_available():
A = 'pt'
elif is_tf_available():
A = 'tf'
else:
A = 'jax'
@require_sentencepiece
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = MarianTokenizer
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def _snake_case ( self : List[Any] ) -> Optional[int]:
super().setUp()
_lowerCamelCase = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
_lowerCamelCase = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_lowerCamelCase = Path(self.tmpdirname )
save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(snake_case__ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(snake_case__ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
_lowerCamelCase = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Any , **snake_case__ : Tuple ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] ) -> str:
return (
"This is a test",
"This is a test",
)
def _snake_case ( self : List[str] ) -> Optional[Any]:
_lowerCamelCase = '</s>'
_lowerCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def _snake_case ( self : int ) -> List[str]:
_lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(snake_case__ ) , 9 )
def _snake_case ( self : int ) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _snake_case ( self : Optional[Any] ) -> Union[str, Any]:
_lowerCamelCase = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
_lowerCamelCase = en_de_tokenizer(['I am a small frog'] , return_tensors=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
_lowerCamelCase = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(snake_case__ , batch.input_ids[0] )
_lowerCamelCase = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(snake_case__ )
_lowerCamelCase = [x.name for x in Path(snake_case__ ).glob('*' )]
self.assertIn('source.spm' , snake_case__ )
MarianTokenizer.from_pretrained(snake_case__ )
def _snake_case ( self : Tuple ) -> Dict:
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = tok(
['I am a small frog' * 1_0_0_0, 'I am a small frog'] , padding=snake_case__ , truncation=snake_case__ , return_tensors=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def _snake_case ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCamelCase = self.get_tokenizer()
_lowerCamelCase = tok(['I am a tiny frog', 'I am a small frog'] , padding=snake_case__ , return_tensors=snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def _snake_case ( self : Union[str, Any] ) -> List[str]:
# fmt: off
_lowerCamelCase = {'input_ids': [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def _snake_case ( self : List[str] ) -> Optional[Any]:
_lowerCamelCase = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
_lowerCamelCase = 'Tämä on testi'
_lowerCamelCase = 'This is a test'
_lowerCamelCase = [7_6, 7, 2_0_4_7, 2]
_lowerCamelCase = [6_9, 1_2, 1_1, 9_4_0, 2]
_lowerCamelCase = tokenizer(snake_case__ ).input_ids
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCamelCase = tokenizer(text_target=snake_case__ ).input_ids
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCamelCase = tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ ) | 234 | from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Dict , *snake_case__ : Optional[int] , **snake_case__ : Optional[Any] ) -> Optional[int]:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : int , *snake_case__ : str , **snake_case__ : int ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : Any , *snake_case__ : Any , **snake_case__ : Any ) -> int:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Optional[Any] , *snake_case__ : Optional[int] , **snake_case__ : List[str] ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : Union[str, Any] , *snake_case__ : List[str] , **snake_case__ : Tuple ) -> int:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : Dict , *snake_case__ : int , **snake_case__ : Dict ) -> str:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Union[str, Any] , *snake_case__ : Union[str, Any] , **snake_case__ : int ) -> Union[str, Any]:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : int , *snake_case__ : int , **snake_case__ : Optional[int] ) -> Optional[int]:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : str , *snake_case__ : List[str] , **snake_case__ : Dict ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : List[Any] , *snake_case__ : int , **snake_case__ : Any ) -> Tuple:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : str , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : List[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Any ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : List[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Union[str, Any] ) -> List[Any]:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : Any , *snake_case__ : Tuple , **snake_case__ : Any ) -> str:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : List[str] , *snake_case__ : str , **snake_case__ : Tuple ) -> Tuple:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Optional[Any] , *snake_case__ : Dict , **snake_case__ : Optional[Any] ) -> Dict:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : Any , *snake_case__ : Optional[Any] , **snake_case__ : int ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : List[Any] , *snake_case__ : Dict , **snake_case__ : Optional[int] ) -> Tuple:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Optional[int] , *snake_case__ : Any , **snake_case__ : Dict ) -> List[str]:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : str , *snake_case__ : Union[str, Any] , **snake_case__ : List[str] ) -> int:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : Any , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ) -> List[str]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : List[str] , *snake_case__ : str , **snake_case__ : str ) -> str:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : str , *snake_case__ : str , **snake_case__ : Dict ) -> Any:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : int , *snake_case__ : Any , **snake_case__ : Tuple ) -> Any:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Optional[int] , *snake_case__ : List[Any] , **snake_case__ : int ) -> int:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : Any , *snake_case__ : Dict , **snake_case__ : Optional[Any] ) -> List[Any]:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : Union[str, Any] , *snake_case__ : Any , **snake_case__ : Dict ) -> Dict:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : int , *snake_case__ : str , **snake_case__ : str ) -> Optional[Any]:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : Optional[Any] , *snake_case__ : Dict , **snake_case__ : List[str] ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : int , *snake_case__ : Optional[Any] , **snake_case__ : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : int , *snake_case__ : Optional[int] , **snake_case__ : Union[str, Any] ) -> Tuple:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : List[Any] , *snake_case__ : int , **snake_case__ : List[Any] ) -> Dict:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : Tuple , *snake_case__ : List[str] , **snake_case__ : Any ) -> Optional[Any]:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : Optional[int] , *snake_case__ : List[str] , **snake_case__ : Dict ) -> Any:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : Dict , *snake_case__ : Any , **snake_case__ : int ) -> Union[str, Any]:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : Optional[int] , *snake_case__ : Union[str, Any] , **snake_case__ : List[str] ) -> Dict:
requires_backends(cls , ['flax'] )
class lowerCAmelCase__ ( metaclass=SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = ['flax']
def __init__( self : int , *snake_case__ : Optional[int] , **snake_case__ : int ) -> int:
requires_backends(self , ['flax'] )
@classmethod
def _snake_case ( cls : List[Any] , *snake_case__ : List[Any] , **snake_case__ : Tuple ) -> Tuple:
requires_backends(cls , ['flax'] )
@classmethod
def _snake_case ( cls : List[Any] , *snake_case__ : List[Any] , **snake_case__ : Optional[int] ) -> Tuple:
requires_backends(cls , ['flax'] ) | 234 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 ,node_index * 2 ,__snake_case ,__snake_case ,__snake_case ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__snake_case ,__snake_case ,__snake_case ) ,)
return min(
minimax(depth + 1 ,node_index * 2 ,__snake_case ,__snake_case ,__snake_case ) ,minimax(depth + 1 ,node_index * 2 + 1 ,__snake_case ,__snake_case ,__snake_case ) ,)
def _lowercase ( ) -> None:
__lowerCAmelCase : int = [90, 23, 6, 33, 21, 65, 123, 34_423]
__lowerCAmelCase : Optional[int] = math.log(len(__snake_case ) ,2 )
print("Optimal value : " ,end="" )
print(minimax(0 ,0 ,__snake_case ,__snake_case ,__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 293 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__snake_case : List[str] = get_logger(__name__)
__snake_case : str = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class A__ :
'''simple docstring'''
@add_start_docstrings(_SCREAMING_SNAKE_CASE)
def __call__( self: Tuple , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
class A__ :
'''simple docstring'''
@add_start_docstrings(_SCREAMING_SNAKE_CASE)
def __call__( self: List[str] , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray) -> jnp.ndarray:
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""")
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(_SCREAMING_SNAKE_CASE)
def __call__( self: Optional[int] , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: int , **_SCREAMING_SNAKE_CASE: str) -> jnp.ndarray:
"""simple docstring"""
for processor in self:
__lowerCAmelCase : int = inspect.signature(processor.__call__).parameters
if len(_SCREAMING_SNAKE_CASE) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys())} for """
F"""{processor.__class__} are passed to the logits processor.""")
__lowerCAmelCase : int = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
else:
__lowerCAmelCase : Union[str, Any] = processor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
return scores
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: int , _SCREAMING_SNAKE_CASE: float) -> str:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""")
__lowerCAmelCase : List[str] = temperature
def __call__( self: Dict , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: int) -> jnp.ndarray:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = scores / self.temperature
return scores
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: float , _SCREAMING_SNAKE_CASE: float = -float("Inf") , _SCREAMING_SNAKE_CASE: int = 1) -> Optional[int]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""")
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""")
__lowerCAmelCase : Union[str, Any] = top_p
__lowerCAmelCase : Optional[int] = filter_value
__lowerCAmelCase : str = min_tokens_to_keep
def __call__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: int) -> jnp.ndarray:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = lax.top_k(_SCREAMING_SNAKE_CASE , scores.shape[-1])
__lowerCAmelCase : List[str] = jnp.full_like(_SCREAMING_SNAKE_CASE , self.filter_value)
__lowerCAmelCase : Any = jax.nn.softmax(_SCREAMING_SNAKE_CASE , axis=-1).cumsum(axis=-1)
__lowerCAmelCase : Optional[Any] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCAmelCase : Tuple = jnp.roll(_SCREAMING_SNAKE_CASE , 1)
score_mask |= score_mask.at[:, 0].set(_SCREAMING_SNAKE_CASE)
# min tokens to keep
__lowerCAmelCase : Union[str, Any] = score_mask.at[:, : self.min_tokens_to_keep].set(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = jnp.where(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = jax.lax.sort_key_val(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)[-1]
return next_scores
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: float = -float("Inf") , _SCREAMING_SNAKE_CASE: int = 1) -> Optional[int]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""")
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = filter_value
def __call__( self: List[Any] , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: int) -> jnp.ndarray:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = scores.shape
__lowerCAmelCase : Union[str, Any] = jnp.full(batch_size * vocab_size , self.filter_value)
__lowerCAmelCase : List[str] = min(self.top_k , scores.shape[-1]) # Safety check
__lowerCAmelCase , __lowerCAmelCase : List[str] = lax.top_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = jnp.broadcast_to((jnp.arange(_SCREAMING_SNAKE_CASE) * vocab_size)[:, None] , (batch_size, topk)).flatten()
__lowerCAmelCase : Any = topk_scores.flatten()
__lowerCAmelCase : Any = topk_indices.flatten() + shift
__lowerCAmelCase : str = next_scores_flat.at[topk_indices_flat].set(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = next_scores_flat.reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
return next_scores
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Tuple = bos_token_id
def __call__( self: Optional[int] , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: int) -> jnp.ndarray:
"""simple docstring"""
__lowerCAmelCase : int = jnp.full(scores.shape , -float("inf"))
__lowerCAmelCase : int = 1 - jnp.bool_(cur_len - 1)
__lowerCAmelCase : Tuple = jnp.where(_SCREAMING_SNAKE_CASE , new_scores.at[:, self.bos_token_id].set(0) , _SCREAMING_SNAKE_CASE)
return scores
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = max_length
__lowerCAmelCase : int = eos_token_id
def __call__( self: List[Any] , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: int) -> jnp.ndarray:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = jnp.full(scores.shape , -float("inf"))
__lowerCAmelCase : Any = 1 - jnp.bool_(cur_len - self.max_length + 1)
__lowerCAmelCase : Union[str, Any] = jnp.where(_SCREAMING_SNAKE_CASE , new_scores.at[:, self.eos_token_id].set(0) , _SCREAMING_SNAKE_CASE)
return scores
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int) -> Optional[Any]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""")
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""")
__lowerCAmelCase : Any = min_length
__lowerCAmelCase : Tuple = eos_token_id
def __call__( self: str , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: int) -> jnp.ndarray:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = 1 - jnp.clip(cur_len - self.min_length , 0 , 1)
__lowerCAmelCase : Union[str, Any] = jnp.where(_SCREAMING_SNAKE_CASE , scores.at[:, self.eos_token_id].set(-float("inf")) , _SCREAMING_SNAKE_CASE)
return scores
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: str) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = list(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = begin_index
def __call__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Any = 1 - jnp.bool_(cur_len - self.begin_index)
__lowerCAmelCase : Optional[Any] = jnp.where(_SCREAMING_SNAKE_CASE , scores.at[:, self.begin_suppress_tokens].set(-float("inf")) , _SCREAMING_SNAKE_CASE)
return scores
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: List[Any] , _SCREAMING_SNAKE_CASE: list) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = list(_SCREAMING_SNAKE_CASE)
def __call__( self: int , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: int) -> jnp.ndarray:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = scores.at[..., self.suppress_tokens].set(-float("inf"))
return scores
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: List[str] , _SCREAMING_SNAKE_CASE: int) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = dict(_SCREAMING_SNAKE_CASE)
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCAmelCase : Any = jnp.ones((max(force_token_map.keys()) + 1) , dtype=jnp.intaa) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCAmelCase : int = force_token_array.at[index].set(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = jnp.intaa(_SCREAMING_SNAKE_CASE)
def __call__( self: List[str] , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: jnp.ndarray , _SCREAMING_SNAKE_CASE: int) -> jnp.ndarray:
"""simple docstring"""
def _force_token(_SCREAMING_SNAKE_CASE: Optional[Any]):
__lowerCAmelCase : int = scores.shape[0]
__lowerCAmelCase : Optional[int] = self.force_token_array[generation_idx]
__lowerCAmelCase : int = jnp.ones_like(_SCREAMING_SNAKE_CASE , dtype=scores.dtype) * -float("inf")
__lowerCAmelCase : Optional[int] = jnp.zeros((batch_size, 1) , dtype=scores.dtype)
__lowerCAmelCase : Dict = lax.dynamic_update_slice(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (0, current_token))
return new_scores
__lowerCAmelCase : Union[str, Any] = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_SCREAMING_SNAKE_CASE) , lambda: scores , ) , )
return scores
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = generate_config.eos_token_id
__lowerCAmelCase : Optional[int] = generate_config.no_timestamps_token_id
__lowerCAmelCase : Dict = generate_config.no_timestamps_token_id + 1
__lowerCAmelCase : Optional[Any] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_SCREAMING_SNAKE_CASE , "max_initial_timestamp_index"):
__lowerCAmelCase : Optional[int] = generate_config.max_initial_timestamp_index
else:
__lowerCAmelCase : Optional[int] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCAmelCase : int = model_config.vocab_size
def __call__( self: Tuple , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = scores.at[:, self.no_timestamps_token_id].set(-float("inf"))
def handle_pairs(_SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Union[str, Any]):
__lowerCAmelCase : Tuple = jnp.where((cur_len - self.begin_index) >= 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Tuple = jnp.where((cur_len - self.begin_index) < 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
return jnp.where(
_SCREAMING_SNAKE_CASE , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf")) , scores_k.at[: self.eos_token_id].set(-float("inf")) , ) , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : int = jax.vmap(_SCREAMING_SNAKE_CASE)(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = jnp.where(cur_len == self.begin_index , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[Any] = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCAmelCase : Tuple = jnp.where(
_SCREAMING_SNAKE_CASE , scores.at[:, last_allowed + 1 :].set(-float("inf")) , _SCREAMING_SNAKE_CASE , )
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCAmelCase : List[str] = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1)
def handle_cumulative_probs(_SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: Any):
__lowerCAmelCase : Tuple = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1)
__lowerCAmelCase : int = jnp.max(logprobs_k[: self.timestamp_begin])
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf")) , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = jax.vmap(_SCREAMING_SNAKE_CASE)(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
return scores | 293 | 1 |
'''simple docstring'''
def A_ ( snake_case = 50 ):
SCREAMING_SNAKE_CASE:Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 465 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( _a , unittest.TestCase ):
_A : str = KandinskyVaaInpaintPipeline
_A : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_A : List[str] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_A : Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_A : int = False
@property
def __UpperCamelCase ( self : Any ):
return 32
@property
def __UpperCamelCase ( self : Tuple ):
return 32
@property
def __UpperCamelCase ( self : Union[str, Any] ):
return self.time_input_dim
@property
def __UpperCamelCase ( self : Any ):
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self : Any ):
return 100
@property
def __UpperCamelCase ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:Optional[Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
SCREAMING_SNAKE_CASE:Dict = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__ )
return model
@property
def __UpperCamelCase ( self : Dict ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self : int ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE:str = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Tuple = self.dummy_unet
SCREAMING_SNAKE_CASE:Union[str, Any] = self.dummy_movq
SCREAMING_SNAKE_CASE:Any = DDIMScheduler(
num_train_timesteps=1_000 ,beta_schedule="linear" ,beta_start=0.00_085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type="epsilon" ,thresholding=SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:Any = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ):
SCREAMING_SNAKE_CASE:Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE__ )
# create init_image
SCREAMING_SNAKE_CASE:Any = floats_tensor((1, 3, 64, 64) ,rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE:Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE:int = np.ones((64, 64) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE:Optional[Any] = 0
if str(SCREAMING_SNAKE_CASE__ ).startswith("mps" ):
SCREAMING_SNAKE_CASE:Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE:List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE:List[Any] = "cpu"
SCREAMING_SNAKE_CASE:Union[str, Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE:List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:str = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:List[Any] = output.images
SCREAMING_SNAKE_CASE:int = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
SCREAMING_SNAKE_CASE:Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE:Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE:Union[str, Any] = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def __UpperCamelCase ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
SCREAMING_SNAKE_CASE:str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
SCREAMING_SNAKE_CASE:List[Any] = np.ones((768, 768) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE:Union[str, Any] = 0
SCREAMING_SNAKE_CASE:Optional[int] = "a hat"
SCREAMING_SNAKE_CASE:Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" ,torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE:List[str] = pipeline.to(SCREAMING_SNAKE_CASE__ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[str] = pipe_prior(
SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt="" ,).to_tuple()
SCREAMING_SNAKE_CASE:Dict = pipeline(
image=SCREAMING_SNAKE_CASE__ ,mask_image=SCREAMING_SNAKE_CASE__ ,image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=100 ,height=768 ,width=768 ,output_type="np" ,)
SCREAMING_SNAKE_CASE:List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
| 465 | 1 |
def _lowerCAmelCase ( A__: list[list] ):
'''simple docstring'''
UpperCAmelCase = current_set.copy()
for row_index, row in enumerate(A__ ):
UpperCAmelCase = row[0]
for column_index, column in enumerate(A__ ):
if magnitude == 0:
UpperCAmelCase = column
continue
UpperCAmelCase = column / magnitude
# Subtract to cancel term
UpperCAmelCase = current_set[0]
UpperCAmelCase = [first_row]
UpperCAmelCase = current_set[1::]
for row in current_set:
UpperCAmelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A__ )
continue
for column_index in range(len(A__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCAmelCase = final_set[0]
UpperCAmelCase = []
UpperCAmelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCAmelCase = simplify(A__ )
for i in range(len(A__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , A__ )
UpperCAmelCase = resultant
return final_set
def _lowerCAmelCase ( A__: list[list] ):
'''simple docstring'''
if len(A__ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
UpperCAmelCase = len(A__ ) + 1
if any(len(A__ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A__ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A__ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCAmelCase = equations.copy()
if any(0 in row for row in data_set ):
UpperCAmelCase = data_set.copy()
UpperCAmelCase = []
for row_index, row in enumerate(A__ ):
if 0 not in row:
UpperCAmelCase = data_set.pop(A__ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , A__ )
UpperCAmelCase = data_set.copy()
UpperCAmelCase = simplify(A__ )
UpperCAmelCase = simplified[::-1]
UpperCAmelCase = []
for row in simplified:
UpperCAmelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCAmelCase = row.copy()[: len(A__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A__ ) == 0:
solutions.append(0 )
continue
UpperCAmelCase = temp_row[1::]
UpperCAmelCase = temp_row[::-1]
for column_index, column in enumerate(A__ ):
current_solution -= column * solutions[column_index]
solutions.append(A__ )
UpperCAmelCase = []
for item in solutions:
final.append(float(round(A__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 254 |
def _lowerCAmelCase ( A__: list[list] ):
'''simple docstring'''
UpperCAmelCase = current_set.copy()
for row_index, row in enumerate(A__ ):
UpperCAmelCase = row[0]
for column_index, column in enumerate(A__ ):
if magnitude == 0:
UpperCAmelCase = column
continue
UpperCAmelCase = column / magnitude
# Subtract to cancel term
UpperCAmelCase = current_set[0]
UpperCAmelCase = [first_row]
UpperCAmelCase = current_set[1::]
for row in current_set:
UpperCAmelCase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A__ )
continue
for column_index in range(len(A__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCAmelCase = final_set[0]
UpperCAmelCase = []
UpperCAmelCase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCAmelCase = simplify(A__ )
for i in range(len(A__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , A__ )
UpperCAmelCase = resultant
return final_set
def _lowerCAmelCase ( A__: list[list] ):
'''simple docstring'''
if len(A__ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
UpperCAmelCase = len(A__ ) + 1
if any(len(A__ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A__ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A__ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCAmelCase = equations.copy()
if any(0 in row for row in data_set ):
UpperCAmelCase = data_set.copy()
UpperCAmelCase = []
for row_index, row in enumerate(A__ ):
if 0 not in row:
UpperCAmelCase = data_set.pop(A__ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , A__ )
UpperCAmelCase = data_set.copy()
UpperCAmelCase = simplify(A__ )
UpperCAmelCase = simplified[::-1]
UpperCAmelCase = []
for row in simplified:
UpperCAmelCase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCAmelCase = row.copy()[: len(A__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A__ ) == 0:
solutions.append(0 )
continue
UpperCAmelCase = temp_row[1::]
UpperCAmelCase = temp_row[::-1]
for column_index, column in enumerate(A__ ):
current_solution -= column * solutions[column_index]
solutions.append(A__ )
UpperCAmelCase = []
for item in solutions:
final.append(float(round(A__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 254 | 1 |
'''simple docstring'''
import os
import string
import sys
UpperCamelCase : Optional[int] = 1 << 8
UpperCamelCase : Any = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
UpperCamelCase : Any = KEYMAP['up']
UpperCamelCase : int = KEYMAP['left']
if sys.platform == "win32":
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : Any = {
B'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
B'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
UpperCamelCase : Tuple = ord(str(i))
def A__ ( ):
if os.name == "nt":
import msvcrt
lowerCamelCase__ = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__lowerCAmelCase ) == 0:
# Read the keystroke
lowerCamelCase__ = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCamelCase__ = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCamelCase__ = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__lowerCAmelCase )
if ord(__lowerCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCamelCase__ = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCamelCase__ = cha[1]
else:
lowerCamelCase__ = ch.decode(__lowerCAmelCase )
else:
lowerCamelCase__ = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCamelCase__ = sys.stdin.fileno()
lowerCamelCase__ = termios.tcgetattr(__lowerCAmelCase )
try:
tty.setraw(__lowerCAmelCase )
lowerCamelCase__ = sys.stdin.read(1 )
finally:
termios.tcsetattr(__lowerCAmelCase , termios.TCSADRAIN , __lowerCAmelCase )
return ch
def A__ ( ):
lowerCamelCase__ = get_raw_chars()
if ord(__lowerCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__lowerCAmelCase ) == KEYMAP["esc"]:
lowerCamelCase__ = get_raw_chars()
if ord(__lowerCAmelCase ) == KEYMAP["mod_int"]:
lowerCamelCase__ = get_raw_chars()
if ord(__lowerCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__lowerCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__lowerCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 9 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] ,model_result["""ss"""] ):
lowerCamelCase__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_lowerCAmelCase )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sgugger/tiny-distilbert-classification"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,only_pretrain_model=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """patrickvonplaten/t5-tiny-random"""
lowerCamelCase__ = AutoConfig.from_pretrained(_lowerCAmelCase )
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase ,configs=[config] )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,"""Cannot do xla on CPU.""" )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,training=_lowerCAmelCase ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,save_to_csv=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ,inference_memory_csv_file=os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ,env_info_csv_file=os.path.join(_lowerCAmelCase ,"""env.csv""" ) ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""env.csv""" ) ).exists() )
def UpperCamelCase_ ( self ):
lowerCamelCase__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_lowerCAmelCase ):
self.assertTrue(hasattr(_lowerCAmelCase ,"""sequential""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""cumulative""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""current""" ) )
self.assertTrue(hasattr(_lowerCAmelCase ,"""total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] ,inference=_lowerCAmelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(_lowerCAmelCase ,"""log.txt""" ) ,log_print=_lowerCAmelCase ,trace_memory_line_by_line=_lowerCAmelCase ,eager_mode=_lowerCAmelCase ,multi_process=_lowerCAmelCase ,)
lowerCamelCase__ = TensorFlowBenchmark(_lowerCAmelCase )
lowerCamelCase__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_lowerCAmelCase ,"""log.txt""" ) ).exists() )
| 9 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _lowercase :
def __init__( self , A__ , A__=1_00 , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=4 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , A__=[0, 1, 2, 3] , ) -> List[Any]:
snake_case = parent
snake_case = 1_00
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = scope
snake_case = out_indices
snake_case = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def UpperCamelCase ( self ) -> str:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self ) -> Tuple:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ ) -> str:
snake_case = BeitModel(config=A__ )
model.to(A__ )
model.eval()
snake_case = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = BeitForMaskedImageModeling(config=A__ )
model.to(A__ )
model.eval()
snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ ) -> Optional[int]:
snake_case = self.type_sequence_label_size
snake_case = BeitForImageClassification(A__ )
model.to(A__ )
model.eval()
snake_case = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = BeitForImageClassification(A__ )
model.to(A__ )
model.eval()
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ ) -> List[Any]:
snake_case = self.num_labels
snake_case = BeitForSemanticSegmentation(A__ )
model.to(A__ )
model.eval()
snake_case = model(A__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
snake_case = model(A__ , labels=A__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def UpperCamelCase ( self ) -> int:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> Tuple:
snake_case = BeitModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCamelCase ( self ) -> Tuple:
pass
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def UpperCamelCase ( self ) -> str:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A__ )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.model_tester.is_training:
return
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A__ ), BeitForMaskedImageModeling]:
continue
snake_case = model_class(A__ )
model.to(A__ )
model.train()
snake_case = self._prepare_for_class(A__ , A__ , return_labels=A__ )
snake_case = model(**A__ ).loss
loss.backward()
def UpperCamelCase ( self ) -> Optional[int]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case = False
snake_case = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case = model_class(A__ )
model.gradient_checkpointing_enable()
model.to(A__ )
model.train()
snake_case = self._prepare_for_class(A__ , A__ , return_labels=A__ )
snake_case = model(**A__ ).loss
loss.backward()
def UpperCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = _config_zero_init(A__ )
for model_class in self.all_model_classes:
snake_case = model_class(config=A__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def UpperCamelCase ( self ) -> Tuple:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = BeitModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def __UpperCamelCase ( ) ->Union[str, Any]:
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> List[str]:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(A__ )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=A__ , return_tensors='''pt''' ).pixel_values.to(A__ )
# prepare bool_masked_pos
snake_case = torch.ones((1, 1_96) , dtype=torch.bool ).to(A__ )
# forward pass
with torch.no_grad():
snake_case = model(pixel_values=A__ , bool_masked_pos=A__ )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , A__ )
snake_case = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(A__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A__ , atol=1e-2 ) )
@slow
def UpperCamelCase ( self ) -> List[str]:
snake_case = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(A__ )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=A__ , return_tensors='''pt''' ).to(A__ )
# forward pass
with torch.no_grad():
snake_case = model(**A__ )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , A__ )
snake_case = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(A__ )
self.assertTrue(torch.allclose(logits[0, :3] , A__ , atol=1e-4 ) )
snake_case = 2_81
self.assertEqual(logits.argmax(-1 ).item() , A__ )
@slow
def UpperCamelCase ( self ) -> str:
snake_case = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
A__ )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=A__ , return_tensors='''pt''' ).to(A__ )
# forward pass
with torch.no_grad():
snake_case = model(**A__ )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , A__ )
snake_case = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(A__ )
self.assertTrue(torch.allclose(logits[0, :3] , A__ , atol=1e-4 ) )
snake_case = 23_96
self.assertEqual(logits.argmax(-1 ).item() , A__ )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
snake_case = model.to(A__ )
snake_case = BeitImageProcessor(do_resize=A__ , size=6_40 , do_center_crop=A__ )
snake_case = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case = Image.open(ds[0]['''file'''] )
snake_case = image_processor(images=A__ , return_tensors='''pt''' ).to(A__ )
# forward pass
with torch.no_grad():
snake_case = model(**A__ )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , A__ )
snake_case = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
snake_case = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=A__ , )
else:
snake_case = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=A__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A__ , atol=1e-4 ) )
@slow
def UpperCamelCase ( self ) -> str:
snake_case = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
snake_case = model.to(A__ )
snake_case = BeitImageProcessor(do_resize=A__ , size=6_40 , do_center_crop=A__ )
snake_case = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case = Image.open(ds[0]['''file'''] )
snake_case = image_processor(images=A__ , return_tensors='''pt''' ).to(A__ )
# forward pass
with torch.no_grad():
snake_case = model(**A__ )
snake_case = outputs.logits.detach().cpu()
snake_case = image_processor.post_process_semantic_segmentation(outputs=A__ , target_sizes=[(5_00, 3_00)] )
snake_case = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , A__ )
snake_case = image_processor.post_process_semantic_segmentation(outputs=A__ )
snake_case = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , A__ )
| 342 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = StableDiffusionPanoramaPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case = DDIMScheduler()
torch.manual_seed(0 )
snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
snake_case = CLIPTextModel(A__ )
snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase ( self , A__ , A__=0 ) -> Union[str, Any]:
snake_case = torch.manual_seed(A__ )
snake_case = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = StableDiffusionPanoramaPipeline(**A__ )
snake_case = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case = self.get_dummy_inputs(A__ )
snake_case = sd_pipe(**A__ ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> Union[str, Any]:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase ( self ) -> Optional[Any]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def UpperCamelCase ( self ) -> List[str]:
snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = StableDiffusionPanoramaPipeline(**A__ )
snake_case = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case = self.get_dummy_inputs(A__ )
snake_case = '''french fries'''
snake_case = sd_pipe(**A__ , negative_prompt=A__ )
snake_case = output.images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = StableDiffusionPanoramaPipeline(**A__ )
snake_case = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case = self.get_dummy_inputs(A__ )
snake_case = sd_pipe(**A__ , view_batch_size=2 )
snake_case = output.images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> List[Any]:
snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
snake_case = StableDiffusionPanoramaPipeline(**A__ )
snake_case = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case = self.get_dummy_inputs(A__ )
snake_case = sd_pipe(**A__ ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> int:
snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , skip_prk_steps=A__ )
snake_case = StableDiffusionPanoramaPipeline(**A__ )
snake_case = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
snake_case = self.get_dummy_inputs(A__ )
snake_case = sd_pipe(**A__ ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self , A__=0 ) -> Any:
snake_case = torch.manual_seed(A__ )
snake_case = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = '''stabilityai/stable-diffusion-2-base'''
snake_case = DDIMScheduler.from_pretrained(A__ , subfolder='''scheduler''' )
snake_case = StableDiffusionPanoramaPipeline.from_pretrained(A__ , scheduler=A__ , safety_checker=A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
snake_case = self.get_inputs()
snake_case = pipe(**A__ ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
snake_case = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def UpperCamelCase ( self ) -> int:
snake_case = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=A__ )
snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
snake_case = self.get_inputs()
snake_case = pipe(**A__ ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
snake_case = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = 0
def callback_fn(A__ , A__ , A__ ) -> None:
snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
snake_case = latents[0, -3:, -3:, -1]
snake_case = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
snake_case = latents[0, -3:, -3:, -1]
snake_case = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case = False
snake_case = '''stabilityai/stable-diffusion-2-base'''
snake_case = DDIMScheduler.from_pretrained(A__ , subfolder='''scheduler''' )
snake_case = StableDiffusionPanoramaPipeline.from_pretrained(A__ , scheduler=A__ , safety_checker=A__ )
snake_case = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
snake_case = self.get_inputs()
pipe(**A__ , callback=A__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase ( self ) -> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case = '''stabilityai/stable-diffusion-2-base'''
snake_case = DDIMScheduler.from_pretrained(A__ , subfolder='''scheduler''' )
snake_case = StableDiffusionPanoramaPipeline.from_pretrained(A__ , scheduler=A__ , safety_checker=A__ )
snake_case = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case = self.get_inputs()
snake_case = pipe(**A__ )
snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 342 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
if point:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
for item in point:
if not isinstance(UpperCAmelCase__ , (int, float) ):
lowercase__ : Any = (
'''Expected a list of numbers as input, found '''
f"""{type(UpperCAmelCase__ ).__name__}"""
)
raise TypeError(UpperCAmelCase__ )
else:
lowercase__ : List[Any] = f"""Expected a list of numbers as input, found {type(UpperCAmelCase__ ).__name__}"""
raise TypeError(UpperCAmelCase__ )
else:
raise ValueError('''Missing an input''' )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
_validate_point(UpperCAmelCase__ )
_validate_point(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError('''Both points must be in the same n-dimensional space''' )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> float:
return base * power(__lowerCamelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
lowerCAmelCase_ = int(input('Enter the base: ').strip())
lowerCAmelCase_ = int(input('Enter the exponent: ').strip())
lowerCAmelCase_ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCAmelCase_ = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 122 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case : List[str] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase_ ( SCREAMING_SNAKE_CASE_ ):
a_ = '''big_bird'''
def __init__( self , UpperCamelCase__=5_0_3_5_8 , UpperCamelCase__=7_6_8 , UpperCamelCase__=1_2 , UpperCamelCase__=1_2 , UpperCamelCase__=3_0_7_2 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=4_0_9_6 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=True , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=6_6 , UpperCamelCase__="block_sparse" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=6_4 , UpperCamelCase__=3 , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , sep_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rescale_embeddings
UpperCAmelCase_ = attention_type
UpperCAmelCase_ = use_bias
UpperCAmelCase_ = block_size
UpperCAmelCase_ = num_random_blocks
UpperCAmelCase_ = classifier_dropout
class lowercase_ ( SCREAMING_SNAKE_CASE_ ):
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 660 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE_ , )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Any = RobertaConfig
__lowercase : Any = '''roberta'''
def __init__( self ,__UpperCAmelCase ) -> Optional[Any]:
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ : Dict = RobertaEmbeddings(__UpperCAmelCase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE_ , )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : str = RobertaConfig
__lowercase : str = '''roberta'''
def __init__( self ,__UpperCAmelCase ) -> str:
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = config.num_labels
lowerCAmelCase__ : Union[str, Any] = config.num_hidden_layers
lowerCAmelCase__ : Optional[int] = DeeRobertaModel(__UpperCAmelCase )
lowerCAmelCase__ : int = nn.Dropout(config.hidden_dropout_prob )
lowerCAmelCase__ : Optional[int] = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=-1 ,__UpperCAmelCase=False ,) -> int:
lowerCAmelCase__ : Any = self.num_layers
try:
lowerCAmelCase__ : Tuple = self.roberta(
__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase ,position_ids=__UpperCAmelCase ,head_mask=__UpperCAmelCase ,inputs_embeds=__UpperCAmelCase ,)
lowerCAmelCase__ : str = outputs[1]
lowerCAmelCase__ : List[str] = self.dropout(__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.classifier(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCAmelCase__ : int = e.message
lowerCAmelCase__ : int = e.exit_layer
lowerCAmelCase__ : str = outputs[0]
if not self.training:
lowerCAmelCase__ : Optional[Any] = entropy(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : List[str] = MSELoss()
lowerCAmelCase__ : Optional[Any] = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
lowerCAmelCase__ : List[str] = CrossEntropyLoss()
lowerCAmelCase__ : Union[str, Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
lowerCAmelCase__ : str = []
for highway_exit in outputs[-1]:
lowerCAmelCase__ : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(__UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCAmelCase__ : List[Any] = MSELoss()
lowerCAmelCase__ : List[Any] = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
lowerCAmelCase__ : Dict = CrossEntropyLoss()
lowerCAmelCase__ : Optional[Any] = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(__UpperCAmelCase )
if train_highway:
lowerCAmelCase__ : List[str] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCAmelCase__ : Optional[int] = (loss,) + outputs
if not self.training:
lowerCAmelCase__ : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCAmelCase__ : int = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 565 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = "mra"
def __init__( self , A_=50265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=1 , A_=0.02 , A_=1E-5 , A_="absolute" , A_=4 , A_="full" , A_=0 , A_=0 , A_=1 , A_=0 , A_=2 , **A_ , ) -> Optional[Any]:
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =type_vocab_size
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =position_embedding_type
__UpperCamelCase =block_per_row
__UpperCamelCase =approx_mode
__UpperCamelCase =initial_prior_first_n_blocks
__UpperCamelCase =initial_prior_diagonal_n_blocks
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowercase_ :
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case_ ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = OpenAIGPTModel(config=a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ , token_type_ids=a_ , head_mask=a_ )
UpperCAmelCase = model(a_ , token_type_ids=a_ )
UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , a_ , a_ , a_ , a_ , *a_ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = OpenAIGPTLMHeadModel(a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , a_ , a_ , a_ , a_ , *a_ ) -> Any:
"""simple docstring"""
UpperCAmelCase = OpenAIGPTDoubleHeadsModel(a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , a_ , a_ , a_ , a_ , *a_ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = OpenAIGPTForSequenceClassification(a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = model(a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class lowercase_ ( a , a , a , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowerCAmelCase : Tuple = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowerCAmelCase : str = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ ) -> int:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case_ ( self , a_ , a_ , a_=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a_ , )
UpperCAmelCase = inputs_dict['labels']
UpperCAmelCase = inputs_dict['labels']
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a_ , )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = OpenAIGPTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a_ , n_embd=3_7 )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a_ )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a_ )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a_ )
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = OpenAIGPTModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(a_ )
UpperCAmelCase = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]] , dtype=torch.long , device=a_ ) # the president is
UpperCAmelCase = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase = model.generate(a_ , do_sample=a_ )
self.assertListEqual(output_ids[0].tolist() , a_ )
| 447 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int | str ):
UpperCAmelCase = str(SCREAMING_SNAKE_CASE )
return n == n[::-1]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int = 100_0000 ):
UpperCAmelCase = 0
for i in range(1 , SCREAMING_SNAKE_CASE ):
if is_palindrome(SCREAMING_SNAKE_CASE ) and is_palindrome(bin(SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 447 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : str , __snake_case : Any=None , __snake_case : Tuple=None , *__snake_case : List[str] , **__snake_case : Any )-> Any:
super().__init__(*__snake_case , **__snake_case )
if config is None:
assert isinstance(self.model , __snake_case ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
snake_case = self.model.config
else:
snake_case = config
snake_case = data_args
snake_case = self.config.tgt_vocab_size if isinstance(self.config , __snake_case ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case = label_smoothed_nll_loss
def lowerCAmelCase ( self : List[str] , __snake_case : int )-> Union[str, Any]:
if self.optimizer is None:
snake_case = ["""bias""", """LayerNorm.weight"""]
snake_case = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
snake_case = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case = Adafactor
snake_case = {"""scale_parameter""": False, """relative_step""": False}
else:
snake_case = AdamW
snake_case = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
snake_case = self.args.learning_rate
if self.sharded_ddp:
snake_case = OSS(
params=__snake_case , optim=__snake_case , **__snake_case , )
else:
snake_case = optimizer_cls(__snake_case , **__snake_case )
if self.lr_scheduler is None:
snake_case = self._get_lr_scheduler(__snake_case )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def lowerCAmelCase ( self : Dict , __snake_case : Tuple )-> int:
snake_case = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__snake_case )
return scheduler
def lowerCAmelCase ( self : str )-> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowerCAmelCase ( self : Dict , __snake_case : List[Any] , __snake_case : str , __snake_case : List[str] )-> Union[str, Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case = model(**__snake_case , use_cache=__snake_case )[0]
snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case , snake_case = model(**__snake_case , labels=__snake_case , use_cache=__snake_case )[:2]
else:
# compute label smoothed loss
snake_case = model(**__snake_case , use_cache=__snake_case )[0]
snake_case = torch.nn.functional.log_softmax(__snake_case , dim=-1 )
snake_case , snake_case = self.loss_fn(__snake_case , __snake_case , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def lowerCAmelCase ( self : Any , __snake_case : Dict , __snake_case : Union[str, Any] )-> Dict:
snake_case = inputs.pop("""labels""" )
snake_case , snake_case = self._compute_loss(__snake_case , __snake_case , __snake_case )
return loss
def lowerCAmelCase ( self : Optional[Any] , __snake_case : nn.Module , __snake_case : Dict[str, Union[torch.Tensor, Any]] , __snake_case : bool , __snake_case : Optional[List[str]] = None , )-> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
snake_case = self._prepare_inputs(__snake_case )
snake_case = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__snake_case , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(__snake_case , gen_kwargs["""max_length"""] )
snake_case = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
snake_case , snake_case = self._compute_loss(__snake_case , __snake_case , __snake_case )
snake_case = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(__snake_case , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def lowerCAmelCase ( self : Dict , __snake_case : str , __snake_case : int )-> Optional[int]:
# If PAD token is not defined at least EOS token has to be defined
snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
snake_case = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case = tensor
return padded_tensor
| 709 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : list ) -> list:
if len(__lowerCAmelCase ) == 0:
return []
snake_case , snake_case = min(__lowerCAmelCase ), max(__lowerCAmelCase )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(__lowerCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(__lowerCAmelCase )
return [v for bucket in buckets for v in sorted(__lowerCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 517 | 0 |
'''simple docstring'''
lowerCAmelCase_ : int = 2_56
# Modulus to hash a string
lowerCAmelCase_ : Dict = 1_00_00_03
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> bool:
_a = len(lowercase )
_a = len(lowercase )
if p_len > t_len:
return False
_a = 0
_a = 0
_a = 1
# Calculating the hash of pattern and substring of text
for i in range(lowercase ):
_a = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_a = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_a = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_a = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _lowerCamelCase ( ) -> None:
_a = "abc1abc12"
_a = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_a = "alskfjaldsk23adsfabcabc"
assert rabin_karp(lowercase , lowercase ) and not rabin_karp(lowercase , lowercase )
# Test 2)
_a = "ABABX"
_a = "ABABZABABYABABX"
assert rabin_karp(lowercase , lowercase )
# Test 3)
_a = "AAAB"
_a = "ABAAAAAB"
assert rabin_karp(lowercase , lowercase )
# Test 4)
_a = "abcdabcy"
_a = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(lowercase , lowercase )
# Test 5)
_a = "Lü"
_a = "Lüsai"
assert rabin_karp(lowercase , lowercase )
_a = "Lue"
assert not rabin_karp(lowercase , lowercase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 692 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , *__a : Tuple , **__a : Optional[Any] ):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 692 | 1 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : Any = logging.get_logger(__name__)
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :int = ['pixel_values']
def __init__( self : List[str] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Union[int, float] = 1 / 255 , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_lowerCAmelCase : str , ) -> None:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
__lowercase = size if size is not None else {"""shortest_edge""": 224}
__lowercase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__lowercase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self : Any , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowercase = int((256 / 224) * size["""shortest_edge"""] )
__lowercase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__lowercase = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
_lowerCAmelCase , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
__lowercase = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Union[str, Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Any , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Optional[Any] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Dict[str, int]] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Dict[str, int]] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[float] = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Union[float, Iterable[float]]] = None , _lowerCAmelCase : Optional[Union[float, Iterable[float]]] = None , _lowerCAmelCase : Optional[TensorType] = None , _lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase : List[Any] , ) -> BatchFeature:
"""simple docstring"""
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(_lowerCAmelCase , param_name="""crop_size""" )
__lowercase = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images]
__lowercase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
__lowercase = {"""pixel_values""": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 53 |
from __future__ import annotations
def snake_case ( lowerCamelCase ):
'''simple docstring'''
if not nums:
return 0
__lowercase = nums[0]
__lowercase = 0
for num in nums[1:]:
__lowercase , __lowercase = (
max_excluding + num,
max(lowerCamelCase , lowerCamelCase ),
)
return max(lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
_SCREAMING_SNAKE_CASE = {
"junnyu/roformer_chinese_small": 15_36,
"junnyu/roformer_chinese_base": 15_36,
"junnyu/roformer_chinese_char_small": 5_12,
"junnyu/roformer_chinese_char_base": 5_12,
"junnyu/roformer_small_discriminator": 1_28,
"junnyu/roformer_small_generator": 1_28,
}
_SCREAMING_SNAKE_CASE = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Dict =VOCAB_FILES_NAMES
__lowerCAmelCase : str =PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Tuple =PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] =RoFormerTokenizer
def __init__( self :Any, snake_case :Union[str, Any]=None, snake_case :int=None, snake_case :Union[str, Any]=True, snake_case :List[Any]="[UNK]", snake_case :Union[str, Any]="[SEP]", snake_case :int="[PAD]", snake_case :Union[str, Any]="[CLS]", snake_case :Optional[int]="[MASK]", snake_case :Dict=True, snake_case :Optional[Any]=None, **snake_case :Union[str, Any], ):
"""simple docstring"""
super().__init__(
snake_case, tokenizer_file=snake_case, do_lower_case=snake_case, unk_token=snake_case, sep_token=snake_case, pad_token=snake_case, cls_token=snake_case, mask_token=snake_case, tokenize_chinese_chars=snake_case, strip_accents=snake_case, **snake_case, )
_lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('lowercase', snake_case) != do_lower_case
or pre_tok_state.get('strip_accents', snake_case) != strip_accents
):
_lowercase =getattr(snake_case, pre_tok_state.pop('type'))
_lowercase =do_lower_case
_lowercase =strip_accents
_lowercase =pre_tok_class(**snake_case)
_lowercase =do_lower_case
def __getstate__( self :Any):
"""simple docstring"""
_lowercase =self.__dict__.copy()
_lowercase =BertPreTokenizer()
return state
def __setstate__( self :List[Any], snake_case :Optional[int]):
"""simple docstring"""
_lowercase =d
_lowercase =self.__dict__['_tokenizer'].get_vocab()
_lowercase =PreTokenizer.custom(JiebaPreTokenizer(snake_case))
def UpperCamelCase__ ( self :Optional[Any], snake_case :Dict, snake_case :List[Any]=None):
"""simple docstring"""
_lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self :Optional[int], snake_case :List[int], snake_case :Optional[List[int]] = None):
"""simple docstring"""
_lowercase =[self.sep_token_id]
_lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCamelCase__ ( self :Union[str, Any], snake_case :str, snake_case :Optional[str] = None):
"""simple docstring"""
_lowercase =self._tokenizer.model.save(snake_case, name=snake_case)
return tuple(snake_case)
def UpperCamelCase__ ( self :List[str], snake_case :List[Any], snake_case :List[str]=None, snake_case :List[Any]=None, snake_case :str=False, **snake_case :Union[str, Any], ):
"""simple docstring"""
_lowercase =BertPreTokenizer()
return super().save_pretrained(snake_case, snake_case, snake_case, snake_case, **snake_case)
| 181 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : str ='''mra'''
def __init__( self :Any, snake_case :List[str]=5_0265, snake_case :List[Any]=768, snake_case :Optional[Any]=12, snake_case :Optional[Any]=12, snake_case :str=3072, snake_case :Tuple="gelu", snake_case :Optional[int]=0.1, snake_case :int=0.1, snake_case :Any=512, snake_case :Union[str, Any]=1, snake_case :Union[str, Any]=0.0_2, snake_case :List[Any]=1e-5, snake_case :Optional[int]="absolute", snake_case :Optional[int]=4, snake_case :str="full", snake_case :Optional[int]=0, snake_case :List[Any]=0, snake_case :int=1, snake_case :List[Any]=0, snake_case :Dict=2, **snake_case :Dict, ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case, bos_token_id=snake_case, eos_token_id=snake_case, **snake_case)
_lowercase =vocab_size
_lowercase =max_position_embeddings
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =initializer_range
_lowercase =type_vocab_size
_lowercase =layer_norm_eps
_lowercase =position_embedding_type
_lowercase =block_per_row
_lowercase =approx_mode
_lowercase =initial_prior_first_n_blocks
_lowercase =initial_prior_diagonal_n_blocks
| 181 | 1 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCamelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _lowercase ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = AudioClassificationPipeline(model=__lowerCamelCase , feature_extractor=__lowerCamelCase )
# test with a raw waveform
UpperCAmelCase = np.zeros((3_4_0_0_0,) )
UpperCAmelCase = np.zeros((1_4_0_0_0,) )
return audio_classifier, [audioa, audio]
def _lowercase ( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = examples
UpperCAmelCase = audio_classifier(__lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
__lowerCamelCase , [
{"""score""": ANY(__lowerCamelCase ), """label""": ANY(__lowerCamelCase )},
{"""score""": ANY(__lowerCamelCase ), """label""": ANY(__lowerCamelCase )},
] , )
UpperCAmelCase = audio_classifier(__lowerCamelCase , top_k=1 )
self.assertEqual(
__lowerCamelCase , [
{"""score""": ANY(__lowerCamelCase ), """label""": ANY(__lowerCamelCase )},
] , )
self.run_torchaudio(__lowerCamelCase )
@require_torchaudio
def _lowercase ( self : str , __lowerCamelCase : int ) -> Any:
"""simple docstring"""
import datasets
# test with a local file
UpperCAmelCase = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
UpperCAmelCase = dataset[0]["""audio"""]["""array"""]
UpperCAmelCase = audio_classifier(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
{"""score""": ANY(__lowerCamelCase ), """label""": ANY(__lowerCamelCase )},
{"""score""": ANY(__lowerCamelCase ), """label""": ANY(__lowerCamelCase )},
] , )
@require_torch
def _lowercase ( self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = """anton-l/wav2vec2-random-tiny-classifier"""
UpperCAmelCase = pipeline("""audio-classification""" , model=__lowerCamelCase )
UpperCAmelCase = np.ones((8_0_0_0,) )
UpperCAmelCase = audio_classifier(__lowerCamelCase , top_k=4 )
UpperCAmelCase = [
{"""score""": 0.0_842, """label""": """no"""},
{"""score""": 0.0_838, """label""": """up"""},
{"""score""": 0.0_837, """label""": """go"""},
{"""score""": 0.0_834, """label""": """right"""},
]
UpperCAmelCase = [
{"""score""": 0.0_845, """label""": """stop"""},
{"""score""": 0.0_844, """label""": """on"""},
{"""score""": 0.0_841, """label""": """right"""},
{"""score""": 0.0_834, """label""": """left"""},
]
self.assertIn(nested_simplify(__lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
UpperCAmelCase = {"""array""": np.ones((8_0_0_0,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
UpperCAmelCase = audio_classifier(__lowerCamelCase , top_k=4 )
self.assertIn(nested_simplify(__lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _lowercase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
import datasets
UpperCAmelCase = """superb/wav2vec2-base-superb-ks"""
UpperCAmelCase = pipeline("""audio-classification""" , model=__lowerCamelCase )
UpperCAmelCase = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
UpperCAmelCase = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
UpperCAmelCase = audio_classifier(__lowerCamelCase , top_k=4 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def _lowercase ( self : Any ) -> Any:
"""simple docstring"""
pass
| 627 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__a = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
__a = """sshleifer/student_marian_en_ro_6_1"""
__a = """sshleifer/tiny-mbart"""
@require_torch
class __lowercase ( __snake_case ):
def _lowercase ( self : Dict , __lowerCamelCase : List[Any]=False , __lowerCamelCase : str=None , __lowerCamelCase : Any=True , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=True , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__lowerCamelCase , num_train_epochs=1 , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , predict_with_generate=__lowerCamelCase , do_train=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , )
UpperCAmelCase = TrainerState.load_from_json(os.path.join(__lowerCamelCase , """trainer_state.json""" ) ).log_history
if not do_eval:
return
UpperCAmelCase = [log for log in logs if """eval_loss""" in log.keys()]
UpperCAmelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __lowerCamelCase )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _lowercase ( self : Dict ) -> str:
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@require_torch_multi_gpu
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCamelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self : Dict ) -> Tuple:
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__lowerCamelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
self.run_seqaseq_quick(
distributed=__lowerCamelCase , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__lowerCamelCase )
@require_apex
@require_torch_gpu
def _lowercase ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__lowerCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
UpperCAmelCase = experiments[experiment_id]
UpperCAmelCase = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
UpperCAmelCase = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__lowerCamelCase , extra_args_str=data["""extra_args_str"""] )
UpperCAmelCase = len(re.findall(__lowerCamelCase , cl.err ) )
self.assertEqual(__lowerCamelCase , data["""n_matches"""] )
@slow
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1_0 , distributed=__lowerCamelCase , )
# Check metrics
UpperCAmelCase = TrainerState.load_from_json(os.path.join(__lowerCamelCase , """trainer_state.json""" ) ).log_history
UpperCAmelCase = [log for log in logs if """eval_loss""" in log.keys()]
UpperCAmelCase = eval_metrics[0]
UpperCAmelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __lowerCamelCase )
# test if do_predict saves generations and metrics
UpperCAmelCase = os.listdir(__lowerCamelCase )
UpperCAmelCase = {os.path.basename(__lowerCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _lowercase ( self : str ) -> int:
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__lowerCamelCase : str ) -> Tuple[int, float]:
UpperCAmelCase = """--skip_memory_metrics 0"""
UpperCAmelCase = self.run_trainer(
max_len=1_2_8 , model_name=__lowerCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__lowerCamelCase , distributed=__lowerCamelCase , extra_args_str=__lowerCamelCase , do_eval=__lowerCamelCase , do_predict=__lowerCamelCase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase = TrainerState.load_from_json(Path(__lowerCamelCase , """trainer_state.json""" ) ).log_history
UpperCAmelCase = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**2_0 )
UpperCAmelCase = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**2_0 )
UpperCAmelCase = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
UpperCAmelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
F""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__lowerCamelCase , __lowerCamelCase , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
F""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__lowerCamelCase , __lowerCamelCase , F"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def _lowercase ( self : Any , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : float = 3e-3 , __lowerCamelCase : str = "adafactor" , __lowerCamelCase : bool = False , __lowerCamelCase : str = None , __lowerCamelCase : int = 0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = None , ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = F"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__lowerCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__lowerCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
UpperCAmelCase = F"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__lowerCamelCase )}
""".split()
UpperCAmelCase = """
--do_predict
""".split()
UpperCAmelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase = get_gpu_count()
UpperCAmelCase = get_torch_dist_unique_port()
UpperCAmelCase = F"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
UpperCAmelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__lowerCamelCase , env=self.get_env() )
else:
UpperCAmelCase = ["""run_translation.py"""] + args
with patch.object(__lowerCamelCase , """argv""" , __lowerCamelCase ):
main()
return output_dir
| 627 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCAmelCase__ : List[str] ='''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase_ :
'''simple docstring'''
UpperCamelCase__ : Dict = PegasusConfig
UpperCamelCase__ : Optional[int] = {}
UpperCamelCase__ : Union[str, Any] = '''gelu'''
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=False , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A=0.1 , _A=0.1 , _A=20 , _A=2 , _A=1 , _A=0 , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = pad_token_id
__SCREAMING_SNAKE_CASE = bos_token_id
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__SCREAMING_SNAKE_CASE = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE = np.concatenate([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(_A , _A , _A )
return config, inputs_dict
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 20
__SCREAMING_SNAKE_CASE = model_class_name(_A )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
__SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
__SCREAMING_SNAKE_CASE = model.decode(_A , _A )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 20
__SCREAMING_SNAKE_CASE = model_class_name(_A )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
__SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
__SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
__SCREAMING_SNAKE_CASE = model.decode(_A , _A , decoder_attention_mask=_A )
__SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def __lowercase ( a__ , a__ , a__ , a__=None , a__=None , ) -> List[str]:
if attention_mask is None:
__SCREAMING_SNAKE_CASE = np.not_equal(a__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ : str = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCamelCase__ : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : List[Any] = False
UpperCamelCase__ : List[Any] = False
UpperCamelCase__ : Union[str, Any] = False
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxPegasusModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A )
def _A ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(_A , _A )
__SCREAMING_SNAKE_CASE = model_class(_A )
@jax.jit
def encode_jitted(_A , _A=None , **_A ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('JIT Enabled' ):
__SCREAMING_SNAKE_CASE = encode_jitted(**_A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = model_class(_A )
__SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__SCREAMING_SNAKE_CASE = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_A , _A , _A ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('JIT Enabled' ):
__SCREAMING_SNAKE_CASE = decode_jitted(**_A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _A ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('google/pegasus-large' , from_pt=_A )
__SCREAMING_SNAKE_CASE = np.ones((1, 1) )
__SCREAMING_SNAKE_CASE = model(_A )
self.assertIsNotNone(_A )
@slow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
__SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
__SCREAMING_SNAKE_CASE = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
__SCREAMING_SNAKE_CASE = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
__SCREAMING_SNAKE_CASE = tokenizer(_A , return_tensors='np' , truncation=_A , max_length=512 , padding=_A )
__SCREAMING_SNAKE_CASE = model.generate(**_A , num_beams=2 ).sequences
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_A , skip_special_tokens=_A )
assert tgt_text == decoded
| 148 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase__ : int ={'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : List[Any] ={
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ : Any ={
'''gpt-neox-20b''': 2048,
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int = ['''input_ids''', '''attention_mask''']
def __init__( self , _A=None , _A=None , _A=None , _A="<|endoftext|>" , _A="<|endoftext|>" , _A="<|endoftext|>" , _A=False , **_A , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , unk_token=_A , bos_token=_A , eos_token=_A , add_prefix_space=_A , **_A , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , _A ) != add_prefix_space:
__SCREAMING_SNAKE_CASE = getattr(_A , pre_tok_state.pop('type' ) )
__SCREAMING_SNAKE_CASE = add_prefix_space
__SCREAMING_SNAKE_CASE = pre_tok_class(**_A )
__SCREAMING_SNAKE_CASE = add_prefix_space
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_A , add_special_tokens=_A ) + [self.eos_token_id] )
if len(_A ) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 148 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
A_ : Optional[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :tuple , UpperCAmelCase__ :Path , UpperCAmelCase__ :Union[str, Any] , UpperCAmelCase__ :Tuple , UpperCAmelCase__ :Optional[Any] , UpperCAmelCase__ :List[str] , UpperCAmelCase__ :List[Any]=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , use_external_data_format=UpperCAmelCase__ , enable_onnx_checker=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
else:
export(
UpperCAmelCase__ , UpperCAmelCase__ , f=output_path.as_posix() , input_names=UpperCAmelCase__ , output_names=UpperCAmelCase__ , dynamic_axes=UpperCAmelCase__ , do_constant_folding=UpperCAmelCase__ , opset_version=UpperCAmelCase__ , )
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase__ :str , UpperCAmelCase__ :str , UpperCAmelCase__ :int , UpperCAmelCase__ :bool = False ):
'''simple docstring'''
a = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
a = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
a = "cpu"
a = Path(UpperCAmelCase__ )
# VAE DECODER
a = AutoencoderKL.from_pretrained(model_path + "/vae" )
a = vae_decoder.config.latent_channels
# forward only through the decoder part
a = vae_decoder.decode
onnx_export(
UpperCAmelCase__ , model_args=(
torch.randn(1 , UpperCAmelCase__ , 25 , 25 ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=UpperCAmelCase__ , )
del vae_decoder
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
A_ : str = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 711 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowercase ( UpperCAmelCase__ ):
_UpperCAmelCase = (UniPCMultistepScheduler,)
_UpperCAmelCase = (('''num_inference_steps''', 25),)
def A ( self : List[Any] , **__lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
a = {
"num_train_timesteps": 1000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCAmelCase )
return config
def A ( self : List[Any] , __lowerCAmelCase : Optional[int]=0 , **__lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a , a = sample, sample
for t in range(__lowerCAmelCase , time_step + scheduler.config.solver_order + 1 ):
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : List[Any] , __lowerCAmelCase : Optional[Any]=0 , **__lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
a = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[: new_scheduler.config.solver_order]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A ( self : str , __lowerCAmelCase : Any=None , **__lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if scheduler is None:
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A ( self : Any ) -> int:
"""simple docstring"""
a = dict(self.forward_default_kwargs )
a = kwargs.pop("num_inference_steps" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCAmelCase )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , "set_timesteps" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
a = dummy_past_residuals[: scheduler.config.solver_order]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A ( self : List[str] ) -> Dict:
"""simple docstring"""
a = UniPCMultistepScheduler(**self.get_scheduler_config() )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
a = DEISMultistepScheduler.from_config(scheduler.config )
a = DPMSolverMultistepScheduler.from_config(scheduler.config )
a = UniPCMultistepScheduler.from_config(scheduler.config )
a = self.full_loop(scheduler=__lowerCAmelCase )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : List[Any] ) -> Dict:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
a = self.full_loop(
solver_order=__lowerCAmelCase , solver_type=__lowerCAmelCase , prediction_type=__lowerCAmelCase , )
assert not torch.isnan(__lowerCAmelCase ).any(), "Samples have nan numbers"
def A ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.check_over_configs(lower_order_final=__lowerCAmelCase )
self.check_over_configs(lower_order_final=__lowerCAmelCase )
def A ( self : Dict ) -> str:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=0 )
def A ( self : Dict ) -> int:
"""simple docstring"""
a = self.full_loop()
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1E-3
def A ( self : Optional[int] ) -> int:
"""simple docstring"""
a = self.full_loop(prediction_type="v_prediction" )
a = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1E-3
def A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
a = self.scheduler_classes[0]
a = self.get_scheduler_config(thresholding=__lowerCAmelCase , dynamic_thresholding_ratio=0 )
a = scheduler_class(**__lowerCAmelCase )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
a = model(__lowerCAmelCase , __lowerCAmelCase )
a = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
assert sample.dtype == torch.floataa
def A ( self : List[str] , **__lowerCAmelCase : int ) -> Dict:
"""simple docstring"""
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__lowerCAmelCase )
a = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 32 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : Dict , snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
snake_case_ = []
snake_case_ = list(range(snake_case ) )
# Find permutation
while factorials:
snake_case_ = factorials.pop()
snake_case_ , snake_case_ = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 400 |
'''simple docstring'''
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
snake_case_ = 6
snake_case_ = 1
snake_case_ = 1_9_0_1
snake_case_ = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
snake_case_ = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
snake_case_ = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
snake_case_ = day - days_per_month[month - 2]
if month > 1_2:
year += 1
snake_case_ = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 400 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCAmelCase = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCAmelCase = {"""facebook/blenderbot_small-90M""": 512}
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = set()
__UpperCAmelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase : Union[str, Any] = char
__UpperCAmelCase : Union[str, Any] = set(lowercase_ )
return pairs
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Any = VOCAB_FILES_NAMES
_lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase__ , lowercase__ , lowercase__="__start__" , lowercase__="__end__" , lowercase__="__unk__" , lowercase__="__null__" , **lowercase__ , ):
super().__init__(unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , pad_token=lowercase__ , **lowercase__)
with open(lowercase__ , encoding='''utf-8''') as vocab_handle:
__UpperCAmelCase : str = json.load(lowercase__)
__UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(lowercase__ , encoding='''utf-8''') as merges_handle:
__UpperCAmelCase : Optional[Any] = merges_handle.read().split('''\n''')[1:-1]
__UpperCAmelCase : Optional[int] = [tuple(merge.split()) for merge in merges]
__UpperCAmelCase : List[str] = dict(zip(lowercase__ , range(len(lowercase__))))
__UpperCAmelCase : Tuple = {}
@property
def A( self):
return len(self.encoder)
def A( self):
return dict(self.encoder , **self.added_tokens_encoder)
def A( self , lowercase__):
if token in self.cache:
return self.cache[token]
__UpperCAmelCase : List[str] = re.sub('''([.,!?()])''' , r''' \1''' , lowercase__)
__UpperCAmelCase : Optional[int] = re.sub('''(\')''' , r''' \1 ''' , lowercase__)
__UpperCAmelCase : List[Any] = re.sub(r'''\s{2,}''' , ''' ''' , lowercase__)
if "\n" in token:
__UpperCAmelCase : int = token.replace('''\n''' , ''' __newln__''')
__UpperCAmelCase : str = token.split(''' ''')
__UpperCAmelCase : Any = []
for token in tokens:
if not len(lowercase__):
continue
__UpperCAmelCase : List[str] = token.lower()
__UpperCAmelCase : Tuple = tuple(lowercase__)
__UpperCAmelCase : int = tuple(list(word[:-1]) + [word[-1] + '''</w>'''])
__UpperCAmelCase : List[str] = get_pairs(lowercase__)
if not pairs:
words.append(lowercase__)
continue
while True:
__UpperCAmelCase : Optional[Any] = min(lowercase__ , key=lambda lowercase__: self.bpe_ranks.get(lowercase__ , float('''inf''')))
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase : List[Any] = bigram
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Optional[Any] = 0
while i < len(lowercase__):
try:
__UpperCAmelCase : Optional[int] = word.index(lowercase__ , lowercase__)
new_word.extend(word[i:j])
__UpperCAmelCase : List[Any] = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(lowercase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__UpperCAmelCase : List[Any] = tuple(lowercase__)
__UpperCAmelCase : List[Any] = new_word
if len(lowercase__) == 1:
break
else:
__UpperCAmelCase : Dict = get_pairs(lowercase__)
__UpperCAmelCase : Union[str, Any] = '''@@ '''.join(lowercase__)
__UpperCAmelCase : Any = word[:-4]
__UpperCAmelCase : Union[str, Any] = word
words.append(lowercase__)
return " ".join(lowercase__)
def A( self , lowercase__):
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : Optional[int] = re.findall(r'''\S+\n?''' , lowercase__)
for token in words:
split_tokens.extend(list(self.bpe(lowercase__).split(''' ''')))
return split_tokens
def A( self , lowercase__):
__UpperCAmelCase : Any = token.lower()
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token))
def A( self , lowercase__):
return self.decoder.get(lowercase__ , self.unk_token)
def A( self , lowercase__):
__UpperCAmelCase : List[str] = ''' '''.join(lowercase__).replace('''@@ ''' , '''''').strip()
return out_string
def A( self , lowercase__ , lowercase__ = None):
if not os.path.isdir(lowercase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__UpperCAmelCase : int = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
__UpperCAmelCase : List[str] = os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(lowercase__ , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__) + '''\n''')
__UpperCAmelCase : List[str] = 0
with open(lowercase__ , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''')
__UpperCAmelCase : List[Any] = token_index
writer.write(''' '''.join(lowercase__) + '''\n''')
index += 1
return vocab_file, merge_file
| 714 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675 | 0 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowercase : List[Any] = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowercase : Any = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowercase : Optional[int] = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowercase : Dict = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def __A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def __A ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int=0.9 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.5 ) -> Dict:
"""simple docstring"""
if NLTK_VERSION >= version.Version("3.6.5" ):
lowerCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(SCREAMING_SNAKE_CASE ) , word_tokenize(SCREAMING_SNAKE_CASE ) , alpha=SCREAMING_SNAKE_CASE , beta=SCREAMING_SNAKE_CASE , gamma=SCREAMING_SNAKE_CASE )
for ref, pred in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
else:
lowerCAmelCase = [
meteor_score.single_meteor_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , alpha=SCREAMING_SNAKE_CASE , beta=SCREAMING_SNAKE_CASE , gamma=SCREAMING_SNAKE_CASE )
for ref, pred in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
return {"meteor": np.mean(SCREAMING_SNAKE_CASE )}
| 649 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( A__ , A__ , A__ ) -> str:
# Initialise PyTorch model
lowerCAmelCase = BertConfig.from_json_file(A__ )
print(f"Building PyTorch model from configuration: {config}" )
lowerCAmelCase = BertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(A__ , A__ , A__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 649 | 1 |
'''simple docstring'''
import argparse
import copy
def _lowerCAmelCase ( lowercase : Tuple ) ->Tuple:
"""simple docstring"""
lowercase__ = {}
with open(lowercase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase__ = []
_list.append([line.split()[1], line.split()[2]] )
lowercase__ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase__ = []
_list.append([line.split()[0], line.split()[2]] )
lowercase__ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _lowerCAmelCase ( lowercase : int , lowercase : List[str] ) ->Optional[int]:
"""simple docstring"""
with open(lowercase ) as f:
lowercase__ = f.read(1 )
lowercase__ = start_node
lowercase__ = []
lowercase__ = start_node
lowercase__ = 0
while visiting not in first_solution:
lowercase__ = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowercase ) and k[0] not in first_solution:
lowercase__ = k[1]
lowercase__ = k[0]
first_solution.append(lowercase )
lowercase__ = distance_of_first_solution + int(lowercase )
lowercase__ = best_node
first_solution.append(lowercase )
lowercase__ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase__ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def _lowerCAmelCase ( lowercase : Any , lowercase : str ) ->str:
"""simple docstring"""
lowercase__ = []
for n in solution[1:-1]:
lowercase__ = solution.index(lowercase )
for kn in solution[1:-1]:
lowercase__ = solution.index(lowercase )
if n == kn:
continue
lowercase__ = copy.deepcopy(lowercase )
lowercase__ = kn
lowercase__ = n
lowercase__ = 0
for k in _tmp[:-1]:
lowercase__ = _tmp[_tmp.index(lowercase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase__ = distance + int(i[1] )
_tmp.append(lowercase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase__ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowercase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _lowerCAmelCase ( lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : List[str] ) ->Dict:
"""simple docstring"""
lowercase__ = 1
lowercase__ = first_solution
lowercase__ = []
lowercase__ = distance_of_first_solution
lowercase__ = solution
while count <= iters:
lowercase__ = find_neighborhood(lowercase , lowercase )
lowercase__ = 0
lowercase__ = neighborhood[index_of_best_solution]
lowercase__ = len(lowercase ) - 1
lowercase__ = False
while not found:
lowercase__ = 0
while i < len(lowercase ):
if best_solution[i] != solution[i]:
lowercase__ = best_solution[i]
lowercase__ = solution[i]
break
lowercase__ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase__ = True
lowercase__ = best_solution[:-1]
lowercase__ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase__ = cost
lowercase__ = solution
else:
lowercase__ = index_of_best_solution + 1
lowercase__ = neighborhood[index_of_best_solution]
if len(lowercase ) >= size:
tabu_list.pop(0 )
lowercase__ = count + 1
return best_solution_ever, best_cost
def _lowerCAmelCase ( lowercase : str=None ) ->int:
"""simple docstring"""
lowercase__ = generate_neighbours(args.File )
lowercase__ , lowercase__ = generate_first_solution(
args.File , lowercase )
lowercase__ , lowercase__ = tabu_search(
lowercase , lowercase , lowercase , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 318 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def _lowerCAmelCase ( lowercase : Any ) ->Any:
"""simple docstring"""
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _lowerCAmelCase ( lowercase : Optional[Any] ) ->List[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase )
def _lowerCAmelCase ( lowercase : str ) ->Optional[Any]:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
lowercase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase , id=lowercase )
def _lowerCAmelCase ( lowercase : Any , lowercase : Dict ) ->Tuple:
"""simple docstring"""
if exitstatus == 5:
lowercase__ = 0
# Doctest custom flag to ignore output.
_lowerCAmelCase = doctest.register_optionflag("IGNORE_RESULT")
_lowerCAmelCase = doctest.OutputChecker
class __A ( a ):
"""simple docstring"""
def snake_case_( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )-> int:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase = CustomOutputChecker
_lowerCAmelCase = HfDoctestModule
_lowerCAmelCase = HfDocTestParser
| 318 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
UpperCAmelCase = get_logger(__name__)
class A_ :
'''simple docstring'''
_UpperCamelCase : Dict = """dummy_data"""
_UpperCamelCase : Optional[int] = """datasets"""
_UpperCamelCase : Tuple = False
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = False , snake_case = True , snake_case = None , ):
lowercase = 0
lowercase = dataset_name
lowercase = cache_dir
lowercase = use_local_dummy_data
lowercase = config
# download_callbacks take a single url as input
lowercase = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase = str(snake_case )
# to be downloaded
lowercase = None
lowercase = None
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._dummy_file is None:
lowercase = self.download_dummy_data()
return self._dummy_file
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip' )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase = cached_path(
snake_case , cache_dir=self.cache_dir , extract_compressed_file=snake_case , force_extract=snake_case )
return os.path.join(snake_case , self.dummy_file_name )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
if self._bucket_url is None:
lowercase = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/' ) )
return self._bucket_url
@property
def SCREAMING_SNAKE_CASE__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/' ).split('/' )[:-1] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase = self.dummy_file_name
# special case when data_url is a dict
if isinstance(snake_case , snake_case ):
return self.create_dummy_data_dict(snake_case , snake_case )
elif isinstance(snake_case , (list, tuple) ):
return self.create_dummy_data_list(snake_case , snake_case )
else:
return self.create_dummy_data_single(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.download_and_extract(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
return path
def SCREAMING_SNAKE_CASE__ ( self ):
return {}
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(snake_case , snake_case ):
for single_url in single_urls:
download_callback(snake_case )
else:
lowercase = single_urls
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(snake_case , snake_case ):
lowercase = [os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) ) for x in single_urls]
else:
lowercase = single_urls
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(Path(snake_case ).name ) )
lowercase = value
# make sure that values are unique
if all(isinstance(snake_case , snake_case ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
lowercase = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , snake_case ) ) for url in data_url )
lowercase = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase = [data_url[0]] * len(snake_case )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(single_url.split('/' )[-1] ) )
dummy_data_list.append(snake_case )
return dummy_data_list
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
for download_callback in self.download_callbacks:
download_callback(snake_case )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase = os.path.join(snake_case , urllib.parse.quote_plus(data_url.split('/' )[-1] ) )
if os.path.exists(snake_case ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
def _iter_archive_members(snake_case ):
# this preserves the order of the members inside the ZIP archive
lowercase = Path(self.dummy_file ).parent
lowercase = path.relative_to(snake_case )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(snake_case )
lowercase = Path(snake_case )
lowercase = _iter_archive_members(snake_case ) if self.use_local_dummy_data else path.rglob('*' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__') ):
yield file_path.relative_to(snake_case ).as_posix(), file_path.open('rb' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if not isinstance(snake_case , snake_case ):
lowercase = [paths]
for path in paths:
if os.path.isfile(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(snake_case ):
if os.path.basename(snake_case ).startswith(('.', '__') ):
continue
dirnames.sort()
for filename in sorted(snake_case ):
if filename.startswith(('.', '__') ):
continue
yield os.path.join(snake_case , snake_case )
| 84 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
A_ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self: List[Any] , *UpperCamelCase_: Union[str, Any] , **UpperCamelCase_: Tuple ):
warnings.warn(
"The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DonutImageProcessor instead." , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 391 | 0 |
'''simple docstring'''
import operator as op
_UpperCamelCase : Optional[Any] = "scaler.pt"
_UpperCamelCase : Union[str, Any] = "pytorch_model"
_UpperCamelCase : str = "random_states"
_UpperCamelCase : Dict = "optimizer"
_UpperCamelCase : str = "scheduler"
_UpperCamelCase : Tuple = "pytorch_model.bin"
_UpperCamelCase : int = "pytorch_model.bin.index.json"
_UpperCamelCase : List[Any] = "model.safetensors"
_UpperCamelCase : Dict = "model.safetensors.index.json"
_UpperCamelCase : List[Any] = "1.10.2"
_UpperCamelCase : Any = "py38"
_UpperCamelCase : str = "4.17.0"
_UpperCamelCase : int = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
_UpperCamelCase : List[Any] = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
_UpperCamelCase : Dict = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
_UpperCamelCase : Dict = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
_UpperCamelCase : List[Any] = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
_UpperCamelCase : Tuple = "2.0.1"
_UpperCamelCase : List[Any] = ["pdsh", "standard", "openmpi", "mvapich"]
_UpperCamelCase : Any = ["default", "reduce-overhead", "max-autotune"]
_UpperCamelCase : Optional[Any] = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_UpperCamelCase : Union[str, Any] = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
_UpperCamelCase : Any = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
_UpperCamelCase : Optional[int] = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 514 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if not conversation_id:
lowerCAmelCase = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase = []
if generated_responses is None:
lowerCAmelCase = []
lowerCAmelCase = conversation_id
lowerCAmelCase = past_user_inputs
lowerCAmelCase = generated_responses
lowerCAmelCase = text
def __eq__( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
F'with: "{text}".' )
lowerCAmelCase = text
else:
logger.warning(
F'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
F'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
lowerCAmelCase = text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase = None
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
self.generated_responses.append(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ):
'''simple docstring'''
lowerCAmelCase = F'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
lowerCAmelCase = 'user' if is_user else 'bot'
output += F'{name} >> {text} \n'
return output
@add_end_docstrings(
a_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class _snake_case ( a_ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase = self.tokenizer.eos_token
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = {}
if min_length_for_response is not None:
lowerCAmelCase = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_SCREAMING_SNAKE_CASE )
return preprocess_params, forward_params, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = super().__call__(_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=32 ):
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
lowerCAmelCase = self.tokenizer._build_conversation_input_ids(_SCREAMING_SNAKE_CASE )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase = self._legacy_parse_and_tokenize(_SCREAMING_SNAKE_CASE )
if self.framework == "pt":
lowerCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=10 , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
lowerCAmelCase = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
lowerCAmelCase = max_length - minimum_tokens
lowerCAmelCase = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase = model_inputs['attention_mask'][:, -trim:]
lowerCAmelCase = model_inputs.pop('conversation' )
lowerCAmelCase = max_length
lowerCAmelCase = self.model.generate(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.model.config.is_encoder_decoder:
lowerCAmelCase = 1
else:
lowerCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
lowerCAmelCase = model_outputs['output_ids']
lowerCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(_SCREAMING_SNAKE_CASE )
return conversation
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = self.tokenizer.eos_token_id
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > self.tokenizer.model_max_length:
lowerCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 514 | 1 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowerCamelCase : str = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def A__ ( __A : Optional[int] , __A : Dict , __A : int=None ) ->int:
if rng is None:
__A =random.Random()
__A =1
for dim in shape:
total_dims *= dim
__A =[]
for _ in range(__A ):
values.append(rng.randint(0 , vocab_size - 1 ) )
__A =np.array(__A , dtype=jnp.intaa ).reshape(__A )
return output
def A__ ( __A : List[Any] , __A : Any=None ) ->Tuple:
__A =ids_tensor(__A , vocab_size=2 , rng=__A )
# make sure that at least one token is attended to for each batch
__A =1
return attn_mask
@require_flax
class lowerCAmelCase__ :
'''simple docstring'''
lowercase_ = None
lowercase_ = ()
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
__A =2
__A =inputs['''input_ids'''].shape[-1] // 2
__A =inputs['''input_ids'''][:max_batch_size, :sequence_length]
__A =jnp.ones_like(lowercase__ )
__A =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
__A =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
__A =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A , __A , __A =self._get_input_ids_and_config()
__A =False
__A =max_length
__A =0
for model_class in self.all_generative_model_classes:
__A =model_class(lowercase__ )
__A =model_class.__name__[4:] # Skip the "Flax" at the beginning
__A =getattr(lowercase__ , lowercase__ )
__A =pt_model_class(lowercase__ ).eval()
__A =load_flax_weights_in_pytorch_model(lowercase__ , flax_model.params )
__A =flax_model.generate(lowercase__ ).sequences
__A =pt_model.generate(torch.tensor(lowercase__ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
__A =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A , __A , __A =self._get_input_ids_and_config()
__A =False
__A =max_length
for model_class in self.all_generative_model_classes:
__A =model_class(lowercase__ )
__A =model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
__A =jit(model.generate )
__A =jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A , __A , __A =self._get_input_ids_and_config()
__A =True
__A =max_length
for model_class in self.all_generative_model_classes:
__A =model_class(lowercase__ )
__A =model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
__A =jit(model.generate )
__A =jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A , __A , __A =self._get_input_ids_and_config()
__A =False
__A =max_length
__A =2
for model_class in self.all_generative_model_classes:
__A =model_class(lowercase__ )
__A =model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
__A =jit(model.generate )
__A =jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A , __A , __A =self._get_input_ids_and_config()
__A =False
__A =max_length
__A =2
__A =2
for model_class in self.all_generative_model_classes:
__A =model_class(lowercase__ )
__A =model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A , __A , __A =self._get_input_ids_and_config()
__A =True
__A =max_length
__A =0.8
__A =1_0
__A =0.3
__A =1
__A =8
__A =9
for model_class in self.all_generative_model_classes:
__A =model_class(lowercase__ )
__A =model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
__A =jit(model.generate )
__A =jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A , __A , __A =self._get_input_ids_and_config()
__A =max_length
__A =1
__A =8
__A =9
for model_class in self.all_generative_model_classes:
__A =model_class(lowercase__ )
__A =model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
__A =jit(model.generate )
__A =jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A , __A , __A =self._get_input_ids_and_config()
__A =max_length
__A =2
__A =1
__A =8
__A =9
for model_class in self.all_generative_model_classes:
__A =model_class(lowercase__ )
__A =model.generate(lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
__A =jit(model.generate )
__A =jit_generate(lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A , __A , __A =self._get_input_ids_and_config()
# pad attention mask on the left
__A =attention_mask.at[(0, 0)].set(0 )
__A =False
__A =max_length
for model_class in self.all_generative_model_classes:
__A =model_class(lowercase__ )
__A =model.generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
__A =jit(model.generate )
__A =jit_generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A , __A , __A =self._get_input_ids_and_config()
# pad attention mask on the left
__A =attention_mask.at[(0, 0)].set(0 )
__A =True
__A =max_length
for model_class in self.all_generative_model_classes:
__A =model_class(lowercase__ )
__A =model.generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
__A =jit(model.generate )
__A =jit_generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A , __A , __A , __A =self._get_input_ids_and_config()
# pad attention mask on the left
__A =attention_mask.at[(0, 0)].set(0 )
__A =2
__A =max_length
for model_class in self.all_generative_model_classes:
__A =model_class(lowercase__ )
__A =model.generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowercase__ )
__A =jit(model.generate )
__A =jit_generate(lowercase__ , attention_mask=lowercase__ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
__A =FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
__A ='''Hello world'''
__A =tokenizer(lowercase__ , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowercase__ , '''do_samples''' ):
model.generate(lowercase__ , do_samples=lowercase__ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowercase__ , '''foo''' ):
__A ={'''foo''': '''bar'''}
model.generate(lowercase__ , **lowercase__ )
| 184 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_lowerCamelCase : List[str] = pytest.mark.integration
@require_faiss
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(lowercase__ ) for x in np.arange(3_0 ).tolist()]} )
return dset
def __UpperCamelCase ( self ):
'''simple docstring'''
import faiss
__A =self._create_dummy_dataset()
__A =dset.map(
lambda lowercase__ , lowercase__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase__ , keep_in_memory=lowercase__ )
__A =dset.add_faiss_index('''vecs''' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
__A , __A =dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __UpperCamelCase ( self ):
'''simple docstring'''
import faiss
__A =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__A , __A =dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __UpperCamelCase ( self ):
'''simple docstring'''
import faiss
__A =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase__ ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
__A , __A =dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(lowercase__ , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __UpperCamelCase ( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
__A =self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
__A ={'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
__A ={'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 2_9}]}}
__A =Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=lowercase__ )
__A , __A =dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
import faiss
__A =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
__A =np.zeros(5 , dtype=np.floataa )
__A =1
__A , __A =index.search(lowercase__ )
self.assertRaises(lowercase__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__A =np.eye(5 , dtype=np.floataa )[::-1]
__A , __A =index.search_batch(lowercase__ )
self.assertRaises(lowercase__ , index.search_batch , queries[0] )
__A =[scores[0] for scores in total_scores]
__A =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowercase__ )
def __UpperCamelCase ( self ):
'''simple docstring'''
import faiss
__A =FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__A =FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowercase__ ):
__A =FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __UpperCamelCase ( self ):
'''simple docstring'''
import faiss
__A =faiss.IndexFlat(5 )
__A =FaissIndex(custom_index=lowercase__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __UpperCamelCase ( self ):
'''simple docstring'''
import faiss
__A =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowercase__ ) as tmp_file:
index.save(tmp_file.name )
__A =FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__A =np.zeros(5 , dtype=np.floataa )
__A =1
__A , __A =index.search(lowercase__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def A__ ( __A : Dict ) ->List[Any]:
import faiss
__A =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__A ='''index.faiss'''
__A =F'''mock://{index_name}'''
index.save(__A , storage_options=mockfs.storage_options )
__A =FaissIndex.load(__A , storage_options=mockfs.storage_options )
__A =np.zeros(5 , dtype=np.floataa )
__A =1
__A , __A =index.search(__A )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __UpperCamelCase ( self ):
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
__A =Elasticsearch()
__A ={'''acknowledged''': True}
__A =ElasticSearchIndex(es_client=lowercase__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
__A ='''foo'''
__A ={'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
__A , __A =index.search(lowercase__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__A ='''foo'''
__A ={'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
__A , __A =index.search(lowercase__ , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__A =['''foo''', '''bar''', '''foobar''']
__A ={'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
__A , __A =index.search_batch(lowercase__ )
__A =[scores[0] for scores in total_scores]
__A =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase__ ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase__ )
# batched queries with timeout
__A =['''foo''', '''bar''', '''foobar''']
__A ={'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
__A , __A =index.search_batch(lowercase__ , request_timeout=3_0 )
__A =[scores[0] for scores in total_scores]
__A =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowercase__ ) , 0 )
self.assertListEqual([1, 1, 1] , lowercase__ )
| 184 | 1 |
lowerCamelCase_ : str = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
lowerCamelCase_ : int = frozenset(["""prompt""", """negative_prompt"""])
lowerCamelCase_ : Optional[int] = frozenset([])
lowerCamelCase_ : Dict = frozenset(["""image"""])
lowerCamelCase_ : Union[str, Any] = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCamelCase_ : Tuple = frozenset(["""image"""])
lowerCamelCase_ : str = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
lowerCamelCase_ : str = frozenset(["""prompt""", """image""", """negative_prompt"""])
lowerCamelCase_ : int = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
lowerCamelCase_ : Any = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
lowerCamelCase_ : Any = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCamelCase_ : Optional[Any] = frozenset(["""image""", """mask_image"""])
lowerCamelCase_ : Dict = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
lowerCamelCase_ : Dict = frozenset(["""example_image""", """image""", """mask_image"""])
lowerCamelCase_ : Any = frozenset(["""class_labels"""])
lowerCamelCase_ : Tuple = frozenset(["""class_labels"""])
lowerCamelCase_ : List[Any] = frozenset(["""batch_size"""])
lowerCamelCase_ : List[Any] = frozenset([])
lowerCamelCase_ : Dict = frozenset(["""batch_size"""])
lowerCamelCase_ : Any = frozenset([])
lowerCamelCase_ : int = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
lowerCamelCase_ : Optional[Any] = frozenset(["""prompt""", """negative_prompt"""])
lowerCamelCase_ : Tuple = frozenset(["""input_tokens"""])
lowerCamelCase_ : Tuple = frozenset(["""input_tokens"""])
| 670 |
import random
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase = False ) -> dict:
UpperCamelCase_: dict = {i: [] for i in range(lowerCamelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(lowerCamelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(lowerCamelCase ):
for j in range(i + 1 , lowerCamelCase ):
if random.random() < probability:
graph[i].append(lowerCamelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(lowerCamelCase )
return graph
def A__ ( lowerCamelCase ) -> dict:
return {
i: [j for j in range(lowerCamelCase ) if i != j] for i in range(lowerCamelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670 | 1 |
'''simple docstring'''
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = size
lowerCamelCase__ = [0] * size
lowerCamelCase__ = [0] * size
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
return index | (index + 1)
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
return (index & (index + 1)) - 1
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
lowerCamelCase__ = value
while index < self.size:
lowerCamelCase__ = self.get_prev(_lowerCAmelCase ) + 1
if current_left_border == index:
lowerCamelCase__ = value
else:
lowerCamelCase__ = max(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = self.get_next(_lowerCAmelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
right -= 1 # Because of right is exclusive
lowerCamelCase__ = 0
while left <= right:
lowerCamelCase__ = self.get_prev(_lowerCAmelCase )
if left <= current_left:
lowerCamelCase__ = max(_lowerCAmelCase ,self.tree[right] )
lowerCamelCase__ = current_left
else:
lowerCamelCase__ = max(_lowerCAmelCase ,self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 525 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : str = "van"
def __init__(self : List[str] , snake_case_ : int=2_2_4 , snake_case_ : Dict=3 , snake_case_ : Any=[7, 3, 3, 3] , snake_case_ : Dict=[4, 2, 2, 2] , snake_case_ : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , snake_case_ : Union[str, Any]=[3, 3, 1_2, 3] , snake_case_ : Optional[int]=[8, 8, 4, 4] , snake_case_ : Dict="gelu" , snake_case_ : Tuple=0.02 , snake_case_ : Dict=1E-6 , snake_case_ : str=1E-2 , snake_case_ : str=0.0 , snake_case_ : str=0.0 , **snake_case_ : Union[str, Any] , ):
super().__init__(**snake_case_ )
__a : str = image_size
__a : Optional[int] = num_channels
__a : Union[str, Any] = patch_sizes
__a : Any = strides
__a : int = hidden_sizes
__a : Union[str, Any] = depths
__a : List[Any] = mlp_ratios
__a : List[Any] = hidden_act
__a : int = initializer_range
__a : Tuple = layer_norm_eps
__a : str = layer_scale_init_value
__a : List[Any] = drop_path_rate
__a : List[str] = dropout_rate
| 713 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __UpperCamelCase ( lowerCAmelCase__ : int ):
random.seed(lowerCAmelCase__ )
np.random.seed(lowerCAmelCase__ )
torch.manual_seed(lowerCAmelCase__ )
torch.cuda.manual_seed_all(lowerCAmelCase__ )
# ^^ safe to call this function even if cuda is not available
class UpperCamelCase__ :
def __init__(self : Any , snake_case_ : Iterable[torch.nn.Parameter] , snake_case_ : float = 0.9999 , snake_case_ : float = 0.0 , snake_case_ : int = 0 , snake_case_ : bool = False , snake_case_ : Union[float, int] = 1.0 , snake_case_ : Union[float, int] = 2 / 3 , snake_case_ : Optional[Any] = None , snake_case_ : Dict[str, Any] = None , **snake_case_ : int , ):
if isinstance(snake_case_ , torch.nn.Module ):
__a : Optional[Any] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ , )
__a : Optional[int] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
__a : str = True
if kwargs.get('''max_value''' , snake_case_ ) is not None:
__a : List[Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ )
__a : Optional[Any] = kwargs['''max_value''']
if kwargs.get('''min_value''' , snake_case_ ) is not None:
__a : Optional[int] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ )
__a : int = kwargs['''min_value''']
__a : Any = list(snake_case_ )
__a : Optional[int] = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , snake_case_ ) is not None:
__a : Optional[Any] = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ )
self.to(device=kwargs['''device'''] )
__a : List[str] = None
__a : Tuple = decay
__a : str = min_decay
__a : Any = update_after_step
__a : List[str] = use_ema_warmup
__a : Any = inv_gamma
__a : Any = power
__a : Union[str, Any] = 0
__a : Dict = None # set in `step()`
__a : Any = model_cls
__a : Any = model_config
@classmethod
def lowerCAmelCase (cls : List[str] , snake_case_ : Dict , snake_case_ : Dict ):
__a , __a : Optional[int] = model_cls.load_config(snake_case_ , return_unused_kwargs=snake_case_ )
__a : Dict = model_cls.from_pretrained(snake_case_ )
__a : List[Any] = cls(model.parameters() , model_cls=snake_case_ , model_config=model.config )
ema_model.load_state_dict(snake_case_ )
return ema_model
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Dict ):
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
__a : int = self.model_cls.from_config(self.model_config )
__a : List[Any] = self.state_dict()
state_dict.pop('''shadow_params''' , snake_case_ )
model.register_to_config(**snake_case_ )
self.copy_to(model.parameters() )
model.save_pretrained(snake_case_ )
def lowerCAmelCase (self : Optional[int] , snake_case_ : int ):
__a : Tuple = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
__a : Tuple = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
__a : List[str] = (1 + step) / (1_0 + step)
__a : Dict = min(snake_case_ , self.decay )
# make sure decay is not smaller than min_decay
__a : int = max(snake_case_ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase (self : Optional[int] , snake_case_ : Iterable[torch.nn.Parameter] ):
if isinstance(snake_case_ , torch.nn.Module ):
__a : List[Any] = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , snake_case_ , standard_warn=snake_case_ , )
__a : Union[str, Any] = parameters.parameters()
__a : Optional[Any] = list(snake_case_ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
__a : str = self.get_decay(self.optimization_step )
__a : List[str] = decay
__a : Dict = 1 - decay
__a : Optional[int] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , snake_case_ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
__a : Dict = deepspeed.zero.GatheredParameters(snake_case_ , modifier_rank=snake_case_ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(snake_case_ )
def lowerCAmelCase (self : int , snake_case_ : Iterable[torch.nn.Parameter] ):
__a : str = list(snake_case_ )
for s_param, param in zip(self.shadow_params , snake_case_ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase (self : int , snake_case_ : int=None , snake_case_ : int=None ):
__a : str = [
p.to(device=snake_case_ , dtype=snake_case_ ) if p.is_floating_point() else p.to(device=snake_case_ )
for p in self.shadow_params
]
def lowerCAmelCase (self : Dict ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase (self : Tuple , snake_case_ : Iterable[torch.nn.Parameter] ):
__a : str = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase (self : Optional[int] , snake_case_ : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , snake_case_ ):
param.data.copy_(c_param.data )
# Better memory-wise.
__a : Optional[Any] = None
def lowerCAmelCase (self : Optional[int] , snake_case_ : dict ):
__a : Dict = copy.deepcopy(snake_case_ )
__a : int = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
__a : List[str] = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , snake_case_ ):
raise ValueError('''Invalid min_decay''' )
__a : Dict = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , snake_case_ ):
raise ValueError('''Invalid optimization_step''' )
__a : Optional[int] = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , snake_case_ ):
raise ValueError('''Invalid update_after_step''' )
__a : Any = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , snake_case_ ):
raise ValueError('''Invalid use_ema_warmup''' )
__a : Any = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
__a : Tuple = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
__a : Dict = state_dict.get('''shadow_params''' , snake_case_ )
if shadow_params is not None:
__a : Tuple = shadow_params
if not isinstance(self.shadow_params , snake_case_ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(snake_case_ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 326 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
'''simple docstring'''
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=9_9 , a_=1_6 , a_=3_6 , a_=6 , a_=6 , a_=6 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ) -> str:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = embedding_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_hidden_groups
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Any:
"""simple docstring"""
UpperCAmelCase = AlbertModel(config=a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ , attention_mask=a_ , token_type_ids=a_ )
UpperCAmelCase = model(a_ , token_type_ids=a_ )
UpperCAmelCase = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Dict:
"""simple docstring"""
UpperCAmelCase = AlbertForPreTraining(config=a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , sentence_order_label=a_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = AlbertForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Any:
"""simple docstring"""
UpperCAmelCase = AlbertForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> str:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = AlbertForSequenceClassification(a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = AlbertForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.num_choices
UpperCAmelCase = AlbertForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( a , a , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase : Dict = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Optional[int] = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : str = True
def snake_case_ ( self , a_ , a_ , a_=False ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class in get_values(a_ ):
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a_ )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = AlbertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=a_ , hidden_size=3_7 )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a_ )
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a_ )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*a_ )
@slow
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = AlbertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = AlbertModel.from_pretrained('albert-base-v2' )
UpperCAmelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(a_ , attention_mask=a_ )[0]
UpperCAmelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , a_ )
UpperCAmelCase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 447 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int | str ):
UpperCAmelCase = str(SCREAMING_SNAKE_CASE )
return n == n[::-1]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int = 100_0000 ):
UpperCAmelCase = 0
for i in range(1 , SCREAMING_SNAKE_CASE ):
if is_palindrome(SCREAMING_SNAKE_CASE ) and is_palindrome(bin(SCREAMING_SNAKE_CASE ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 447 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : str = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Union[str, Any] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707 | from collections import namedtuple
lowerCAmelCase__ : Union[str, Any] = namedtuple('''from_to''', '''from_ to''')
lowerCAmelCase__ : Tuple = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.0_01, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_04_54, 2_64.1_72),
'''cubicyard''': from_to(0.7_64_55, 1.3_07_95),
'''cubicfoot''': from_to(0.0_28, 35.31_47),
'''cup''': from_to(0.0_00_23_65_88, 42_26.75),
}
def UpperCamelCase__ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 699 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def A__ (self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''')
__UpperCAmelCase =sd_pipe.to(UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase)
sd_pipe.set_scheduler('''sample_euler''')
__UpperCAmelCase ='''A painting of a squirrel eating a burger'''
__UpperCAmelCase =torch.manual_seed(0)
__UpperCAmelCase =sd_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''')
__UpperCAmelCase =output.images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase =np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
__UpperCAmelCase =sd_pipe.to(UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase)
sd_pipe.set_scheduler('''sample_euler''')
__UpperCAmelCase ='''A painting of a squirrel eating a burger'''
__UpperCAmelCase =torch.manual_seed(0)
__UpperCAmelCase =sd_pipe([prompt] , generator=UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=2_0 , output_type='''np''')
__UpperCAmelCase =output.images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase =np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-1
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''')
__UpperCAmelCase =sd_pipe.to(UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase)
sd_pipe.set_scheduler('''sample_dpmpp_2m''')
__UpperCAmelCase ='''A painting of a squirrel eating a burger'''
__UpperCAmelCase =torch.manual_seed(0)
__UpperCAmelCase =sd_pipe(
[prompt] , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=1_5 , output_type='''np''' , use_karras_sigmas=UpperCAmelCase , )
__UpperCAmelCase =output.images
__UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__UpperCAmelCase =np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 132 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
a_ : Optional[Any] = ['''image_processor''', '''tokenizer''']
a_ : Any = '''BridgeTowerImageProcessor'''
a_ : Any = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__(self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
super().__init__(UpperCAmelCase , UpperCAmelCase)
def __call__(self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
'''simple docstring'''
__UpperCAmelCase =self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel_values + pixel_mask
__UpperCAmelCase =self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , do_normalize=UpperCAmelCase , do_center_crop=UpperCAmelCase , **UpperCAmelCase)
encoding.update(UpperCAmelCase)
return encoding
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase)
def A__ (self , *UpperCAmelCase , **UpperCAmelCase):
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase)
@property
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.tokenizer.model_input_names
__UpperCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 132 | 1 |
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase__: list[int] , UpperCamelCase__: int ) -> list[int]:
"""simple docstring"""
A = 0
A = len(UpperCamelCase__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
A = i + 1
else:
A = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 546 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _lowerCAmelCase ( UpperCamelCase__: Any ) -> Tuple:
"""simple docstring"""
def wrapper(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
A = timeit.default_timer()
A = func(*UpperCamelCase__ , **UpperCamelCase__ )
A = timeit.default_timer() - starttime
return delta
A = func.__name__
return wrapper
def _lowerCAmelCase ( UpperCamelCase__: dict , UpperCamelCase__: List[str]=1_00 , UpperCamelCase__: int=None ) -> Any:
"""simple docstring"""
A = []
A = seq_shapes or {}
for i in range(UpperCamelCase__ ):
A = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCamelCase__ , _ArrayXD ):
A = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCamelCase__ , datasets.Value ):
if v.dtype == "string":
A = """The small grey turtle was surprisingly fast when challenged."""
else:
A = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCamelCase__ , datasets.Sequence ):
while isinstance(UpperCamelCase__ , datasets.Sequence ):
A = v.feature
A = seq_shapes[k]
A = np.random.rand(*UpperCamelCase__ ).astype(v.dtype )
A = data
dummy_data.append((i, example) )
return dummy_data
def _lowerCAmelCase ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=1_00 , UpperCamelCase__: str=None ) -> Optional[int]:
"""simple docstring"""
A = generate_examples(UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes=UpperCamelCase__ )
with ArrowWriter(features=UpperCamelCase__ , path=UpperCamelCase__ ) as writer:
for key, record in dummy_data:
A = features.encode_example(UpperCamelCase__ )
writer.write(UpperCamelCase__ )
A , A = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
A = datasets.Dataset.from_file(filename=UpperCamelCase__ , info=datasets.DatasetInfo(features=UpperCamelCase__ ) )
return dataset
| 546 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__UpperCAmelCase = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def lowerCAmelCase ( __UpperCamelCase=None ):
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase__ : int = subparsers.add_parser("""tpu-config""" , description=_description )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
UpperCAmelCase__ : str = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=__a , default=__a , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=__a , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=__a , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
UpperCAmelCase__ : List[str] = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=__a , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__a ):
UpperCAmelCase__ : str = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCAmelCase__ : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCAmelCase__ : Dict = defaults.commands
if not args.tpu_name:
UpperCAmelCase__ : List[str] = defaults.tpu_name
if not args.tpu_zone:
UpperCAmelCase__ : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCAmelCase__ : Tuple = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
UpperCAmelCase__ : Tuple = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , __a ):
UpperCAmelCase__ : List[str] = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
UpperCAmelCase__ : Union[str, Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __a ):
UpperCAmelCase__ : Optional[int] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCAmelCase__ : Optional[int] = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
UpperCAmelCase__ : List[Any] = """; """.join(__a )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCAmelCase__ : Dict = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(__a )}" )
return
subprocess.run(__a )
print("""Successfully setup pod.""" )
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tpu_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
tpu_command_launcher(__a )
| 65 |
from importlib import import_module
from .logging import get_logger
_lowerCAmelCase: str = get_logger(__name__)
class lowercase_ :
def __init__( self , lowercase_ , lowercase_=None) -> Tuple:
a__ =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__'):
setattr(self , lowercase_ , getattr(lowercase_ , lowercase_))
a__ =module._original_module if isinstance(lowercase_ , _PatchedModuleObj) else module
class lowercase_ :
snake_case =[]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None) -> List[str]:
a__ =obj
a__ =target
a__ =new
a__ =target.split('.')[0]
a__ ={}
a__ =attrs or []
def __enter__( self) -> Optional[int]:
*a__ , a__ =self.target.split('.')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowercase_)):
try:
a__ =import_module('.'.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
a__ =getattr(self.obj , lowercase_)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowercase_ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
a__ =obj_attr
# patch at top level
setattr(self.obj , lowercase_ , _PatchedModuleObj(lowercase_ , attrs=self.attrs))
a__ =getattr(self.obj , lowercase_)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowercase_ , lowercase_ , _PatchedModuleObj(getattr(lowercase_ , lowercase_ , lowercase_) , attrs=self.attrs))
a__ =getattr(lowercase_ , lowercase_)
# finally set the target attribute
setattr(lowercase_ , lowercase_ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
a__ =getattr(import_module('.'.join(lowercase_)) , lowercase_)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowercase_) is attr_value:
a__ =getattr(self.obj , lowercase_)
setattr(self.obj , lowercase_ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
a__ =globals()['__builtins__'][target_attr]
setattr(self.obj , lowercase_ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *lowercase_) -> str:
for attr in list(self.original):
setattr(self.obj , lowercase_ , self.original.pop(lowercase_))
def __UpperCamelCase ( self) -> Any:
self.__enter__()
self._active_patches.append(self)
def __UpperCamelCase ( self) -> Union[str, Any]:
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 20 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE = 'AutoImageProcessor'
_SCREAMING_SNAKE_CASE = 'AutoTokenizer'
def __init__( self , lowercase=None , lowercase=None , **lowercase ) -> Tuple:
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowercase , )
lowerCAmelCase = kwargs.pop("""feature_extractor""" )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowercase , lowercase )
lowerCAmelCase = self.image_processor
lowerCAmelCase = False
def __call__( self , *lowercase , **lowercase ) -> str:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
lowerCAmelCase = kwargs.pop("""images""" , lowercase )
lowerCAmelCase = kwargs.pop("""text""" , lowercase )
if len(lowercase ) > 0:
lowerCAmelCase = args[0]
lowerCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
lowerCAmelCase = self.image_processor(lowercase , *lowercase , **lowercase )
if text is not None:
lowerCAmelCase = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self , *lowercase , **lowercase ) -> Optional[int]:
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _snake_case ( self , *lowercase , **lowercase ) -> Optional[Any]:
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def _snake_case ( self ) -> List[Any]:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
lowerCAmelCase = True
lowerCAmelCase = self.tokenizer
yield
lowerCAmelCase = self.image_processor
lowerCAmelCase = False
def _snake_case ( self , lowercase , lowercase=False , lowercase=None ) -> Tuple:
if added_vocab is None:
lowerCAmelCase = self.tokenizer.get_added_vocab()
lowerCAmelCase = {}
while tokens:
lowerCAmelCase = re.search(r"""<s_(.*?)>""" , lowercase , re.IGNORECASE )
if start_token is None:
break
lowerCAmelCase = start_token.group(1 )
lowerCAmelCase = re.search(rf'</s_{key}>' , lowercase , re.IGNORECASE )
lowerCAmelCase = start_token.group()
if end_token is None:
lowerCAmelCase = tokens.replace(lowercase , """""" )
else:
lowerCAmelCase = end_token.group()
lowerCAmelCase = re.escape(lowercase )
lowerCAmelCase = re.escape(lowercase )
lowerCAmelCase = re.search(f'{start_token_escaped}(.*?){end_token_escaped}' , lowercase , re.IGNORECASE )
if content is not None:
lowerCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCAmelCase = self.tokenajson(lowercase , is_inner_value=lowercase , added_vocab=lowercase )
if value:
if len(lowercase ) == 1:
lowerCAmelCase = value[0]
lowerCAmelCase = value
else: # leaf nodes
lowerCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
lowerCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(lowercase )
if len(output[key] ) == 1:
lowerCAmelCase = output[key][0]
lowerCAmelCase = tokens[tokens.find(lowercase ) + len(lowercase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowercase , added_vocab=lowercase )
if len(lowercase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self ) -> List[str]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowercase , )
return self.image_processor_class
@property
def _snake_case ( self ) -> Tuple:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowercase , )
return self.image_processor
| 393 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393 | 1 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
snake_case_ : Optional[int] = 16
snake_case_ : Optional[Any] = 32
def __a ( __UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return int(x / 2**20 )
class snake_case_ :
'''simple docstring'''
def __enter__( self : Optional[Any] ) -> str:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase_ : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Any , *__magic_name__ : str ) -> Tuple:
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase_ : Optional[Any] = torch.cuda.memory_allocated()
lowerCamelCase_ : Tuple = torch.cuda.max_memory_allocated()
lowerCamelCase_ : Tuple = bamb(self.end - self.begin )
lowerCamelCase_ : List[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __a ( __UpperCAmelCase : Accelerator , __UpperCAmelCase : int = 16 , __UpperCAmelCase : str = "bert-base-cased" , __UpperCAmelCase : int = 320 , __UpperCAmelCase : int = 160 , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = load_dataset(
"glue" , "mrpc" , split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"} )
def tokenize_function(__UpperCAmelCase : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ : Optional[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ : Dict = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__UpperCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__UpperCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCamelCase_ : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
lowerCamelCase_ : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
def __a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ : List[str] = config['''lr''']
lowerCamelCase_ : List[str] = int(config["num_epochs"] )
lowerCamelCase_ : Union[str, Any] = int(config["seed"] )
lowerCamelCase_ : Optional[Any] = int(config["batch_size"] )
lowerCamelCase_ : List[Any] = args.model_name_or_path
set_seed(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase , return_dict=__UpperCAmelCase )
# Instantiate optimizer
lowerCamelCase_ : List[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ : Tuple = optimizer_cls(params=model.parameters() , lr=__UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCamelCase_ : str = 1
lowerCamelCase_ : Optional[int] = (len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=0 , num_training_steps=__UpperCAmelCase , )
else:
lowerCamelCase_ : Optional[int] = DummyScheduler(__UpperCAmelCase , total_num_steps=__UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ : int = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ : int = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ : Dict = 0
# Now we train the model
lowerCamelCase_ : int = {}
for epoch in range(__UpperCAmelCase , __UpperCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
lowerCamelCase_ : Dict = model(**__UpperCAmelCase )
lowerCamelCase_ : Any = outputs.loss
lowerCamelCase_ : Any = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase_ : Union[str, Any] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __a ( ) -> str:
"""simple docstring"""
lowerCamelCase_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__UpperCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__UpperCAmelCase , )
parser.add_argument(
"--output_dir" , type=__UpperCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__UpperCAmelCase , default=__UpperCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__UpperCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__UpperCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__UpperCAmelCase , default=1 , help="Number of train epochs." , )
lowerCamelCase_ : List[Any] = parser.parse_args()
lowerCamelCase_ : Union[str, Any] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 488 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowerCAmelCase_ : Any = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def UpperCAmelCase ( A : Dict ):
SCREAMING_SNAKE_CASE : Optional[Any] = {}
with open(A , '''r''' ) as file:
for line_number, line in enumerate(A ):
SCREAMING_SNAKE_CASE : Dict = line.strip()
if line:
SCREAMING_SNAKE_CASE : Union[str, Any] = line.split()
SCREAMING_SNAKE_CASE : Any = line_number
SCREAMING_SNAKE_CASE : Any = words[0]
SCREAMING_SNAKE_CASE : Optional[Any] = value
return result
def UpperCAmelCase ( A : Union[str, Any] , A : Tuple , A : List[Any] , A : str , A : int ):
for attribute in key.split('''.''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(A , A )
SCREAMING_SNAKE_CASE : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
SCREAMING_SNAKE_CASE : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE : Tuple = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE : List[Any] = getattr(A , A ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE : List[str] = hf_pointer
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE : int = getattr(A , A )
SCREAMING_SNAKE_CASE : int = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE : Dict = value[0]
else:
SCREAMING_SNAKE_CASE : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE : str = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE : List[str] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE : Optional[int] = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE : Dict = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
SCREAMING_SNAKE_CASE : List[str] = getattr(A , A )
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase ( A : Optional[Any] , A : List[str] , A : Optional[int] , A : Tuple , A : Tuple ):
SCREAMING_SNAKE_CASE : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(A ):
SCREAMING_SNAKE_CASE : List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]]
SCREAMING_SNAKE_CASE : List[Any] = '''param'''
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE : str = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE : Tuple = '''.'''.join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = key
SCREAMING_SNAKE_CASE : int = value if '''lm_head''' in full_key else value[0]
lowerCAmelCase_ : Union[str, Any] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def UpperCAmelCase ( A : List[Any] , A : Tuple , A : Dict=None , A : Optional[int]=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE : Optional[Any] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
SCREAMING_SNAKE_CASE : Optional[int] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE : int = name.split(A )[0].split('''.''' )[-2]
SCREAMING_SNAKE_CASE : Union[str, Any] = mapped_key.replace('''*''' , A )
if "weight_g" in name:
SCREAMING_SNAKE_CASE : List[Any] = '''weight_g'''
elif "weight_v" in name:
SCREAMING_SNAKE_CASE : int = '''weight_v'''
elif "bias" in name:
SCREAMING_SNAKE_CASE : str = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE : Optional[Any] = '''weight'''
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
if hf_dict is not None:
rename_dict(A , A , A , A , A )
else:
set_recursively(A , A , A , A , A )
return is_used
return is_used
def UpperCAmelCase ( A : Optional[int] , A : Any , A : int ):
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Any = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE : str = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , )
SCREAMING_SNAKE_CASE : List[str] = True
else:
SCREAMING_SNAKE_CASE : Optional[Any] = load_wavaveca_layer(A , A , A )
if not is_used:
unused_weights.append(A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCAmelCase ( A : Tuple , A : int , A : Tuple , A : int , A : Union[str, Any] ):
SCREAMING_SNAKE_CASE : str = full_name.split('''conv_layers.''' )[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = name.split('''.''' )
SCREAMING_SNAKE_CASE : Tuple = int(items[0] )
SCREAMING_SNAKE_CASE : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
SCREAMING_SNAKE_CASE : Tuple = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A )
@torch.no_grad()
def UpperCAmelCase ( A : Any , A : Union[str, Any] , A : Optional[Any]=None , A : Any=None , A : Any=True , A : str=False ):
if config_path is not None:
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaConfig.from_pretrained(A )
else:
SCREAMING_SNAKE_CASE : Any = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE : List[Any] = read_txt_into_dict(A )
SCREAMING_SNAKE_CASE : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE : Tuple = WavaVecaForSequenceClassification(A )
SCREAMING_SNAKE_CASE : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
feature_extractor.save_pretrained(A )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE : Optional[Any] = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE : List[str] = target_dict.pad_index
SCREAMING_SNAKE_CASE : Optional[Any] = target_dict.bos_index
SCREAMING_SNAKE_CASE : Dict = target_dict.eos_index
SCREAMING_SNAKE_CASE : Dict = len(target_dict.symbols )
SCREAMING_SNAKE_CASE : str = os.path.join(A , '''vocab.json''' )
if not os.path.isdir(A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A ) )
return
os.makedirs(A , exist_ok=A )
SCREAMING_SNAKE_CASE : Optional[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Tuple = 1
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(A , A )
SCREAMING_SNAKE_CASE : Any = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A , )
SCREAMING_SNAKE_CASE : Optional[Any] = True if config.feat_extract_norm == '''layer''' else False
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
SCREAMING_SNAKE_CASE : Any = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
SCREAMING_SNAKE_CASE : List[str] = WavaVecaForCTC(A )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaForPreTraining(A )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE : str = argparse.Namespace(task='''audio_pretraining''' )
SCREAMING_SNAKE_CASE : Dict = fairseq.tasks.setup_task(A )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A )
SCREAMING_SNAKE_CASE : List[Any] = model[0].eval()
recursively_load_weights(A , A , not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowerCAmelCase_ : Union[str, Any] = parser.parse_args()
lowerCAmelCase_ : Dict = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 527 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : List[str] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''ViTFeatureExtractor''']
a__ : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Any = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
a__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 333 |
from numpy import exp, pi, sqrt
def UpperCAmelCase_( a__ , a__ = 0.0 , a__ = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class _snake_case ( _a ):
_A : List[str] = '''camembert'''
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=30_522 ,SCREAMING_SNAKE_CASE__ : int=768 ,SCREAMING_SNAKE_CASE__ : List[Any]=12 ,SCREAMING_SNAKE_CASE__ : Any=12 ,SCREAMING_SNAKE_CASE__ : Tuple=3_072 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=512 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : Any=1e-12 ,SCREAMING_SNAKE_CASE__ : str=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : Any="absolute" ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE:str = hidden_size
SCREAMING_SNAKE_CASE:str = num_hidden_layers
SCREAMING_SNAKE_CASE:List[str] = num_attention_heads
SCREAMING_SNAKE_CASE:Optional[int] = hidden_act
SCREAMING_SNAKE_CASE:int = intermediate_size
SCREAMING_SNAKE_CASE:List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:str = max_position_embeddings
SCREAMING_SNAKE_CASE:Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE:Optional[int] = initializer_range
SCREAMING_SNAKE_CASE:Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE:Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE:Optional[int] = use_cache
SCREAMING_SNAKE_CASE:List[Any] = classifier_dropout
class _snake_case ( _a ):
@property
def __UpperCamelCase ( self : List[str] ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE:Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE:str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 143 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
A_ = "examples/"
A_ = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A_ = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A_ = "README.md"
def A_ ( snake_case , snake_case , snake_case ):
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE:List[str] = f.read()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE:Tuple = replace.replace("VERSION" , snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = re_pattern.sub(snake_case , snake_case )
with open(snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(snake_case )
def A_ ( snake_case ):
for folder, directories, fnames in os.walk(snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(snake_case , snake_case ) , snake_case , pattern="examples" )
def A_ ( snake_case , snake_case=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(snake_case , snake_case , snake_case )
if not patch:
update_version_in_examples(snake_case )
def A_ ( ):
SCREAMING_SNAKE_CASE:int = "🤗 Transformers currently provides the following architectures"
SCREAMING_SNAKE_CASE:int = "1. Want to contribute a new model?"
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE:List[Any] = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE:Dict = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE:str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE:Optional[Any] = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(snake_case )
def A_ ( ):
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE:str = f.read()
SCREAMING_SNAKE_CASE:Tuple = REPLACE_PATTERNS["init"][0].search(snake_case ).groups()[0]
return packaging.version.parse(snake_case )
def A_ ( snake_case=False ):
SCREAMING_SNAKE_CASE:Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE:Any = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE:str = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
SCREAMING_SNAKE_CASE:str = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE:Optional[int] = input(F'''Which version are you releasing? [{default_version}]''' )
if len(snake_case ) == 0:
SCREAMING_SNAKE_CASE:Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(snake_case , patch=snake_case )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def A_ ( ):
SCREAMING_SNAKE_CASE:int = get_version()
SCREAMING_SNAKE_CASE:int = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
SCREAMING_SNAKE_CASE:Optional[Any] = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE:Any = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(snake_case ) == 0:
SCREAMING_SNAKE_CASE:Union[str, Any] = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(snake_case )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 143 | 1 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A__ = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = torch.load(UpperCamelCase__ , map_location='''cpu''' )
A__ = Namespace(**checkpoint['''cfg''']['''model'''] )
A__ = checkpoint['''model''']
remove_ignore_keys_(UpperCamelCase__ )
A__ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
A__ = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
A__ = XGLMConfig(
vocab_size=UpperCamelCase__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
A__ = XGLMForCausalLM(UpperCamelCase__ )
A__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
print(UpperCamelCase__ )
A__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowerCamelCase : Optional[Any] = parser.parse_args()
_lowerCamelCase : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path) | 703 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = features.copy() if features else default_expected_features
A__ = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ = ParquetDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
if issubclass(lowercase_ , lowercase_ ):
A__ = parquet_path
elif issubclass(lowercase_ , lowercase_ ):
A__ = [parquet_path]
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_dataset(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=("train",) ) -> List[str]:
"""simple docstring"""
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
A__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = features.copy() if features else default_expected_features
A__ = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
if split:
A__ = {split: parquet_path}
else:
A__ = '''train'''
A__ = {'''train''': parquet_path, '''test''': parquet_path}
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = ParquetDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_parquet_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = ParquetDatasetWriter(lowercase_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
A__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
A__ = pf.read()
assert dataset.data.table == output_table
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = str(shared_datadir / '''test_image_rgb.jpg''' )
A__ = {'''image''': [image_path]}
A__ = Features({'''image''': Image()} )
A__ = Dataset.from_dict(lowercase_ , features=lowercase_ )
A__ = ParquetDatasetWriter(lowercase_ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
A__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
A__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase_ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
assert get_writer_batch_size(lowercase_ ) == expected
| 177 | 0 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def A__ ( ) -> Dict:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
_UpperCAmelCase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def A__ ( ) -> Dict:
"""simple docstring"""
assert _test_patching.open is open
_UpperCAmelCase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , SCREAMING_SNAKE_CASE_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def A__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , SCREAMING_SNAKE_CASE_ ):
pass
def A__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , SCREAMING_SNAKE_CASE_ ) is None
with patch_submodule(_test_patching , '''len''' , SCREAMING_SNAKE_CASE_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def A__ ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = '''__test_patch_submodule_start_and_stop_mock__'''
_UpperCAmelCase = patch_submodule(_test_patching , '''open''' , SCREAMING_SNAKE_CASE_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def A__ ( ) -> Union[str, Any]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
_UpperCAmelCase = '''__test_patch_submodule_successive_join__'''
_UpperCAmelCase = '''__test_patch_submodule_successive_dirname__'''
_UpperCAmelCase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching , '''os.rename''' , SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , SCREAMING_SNAKE_CASE_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching , '''os.path.join''' , SCREAMING_SNAKE_CASE_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , SCREAMING_SNAKE_CASE_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def A__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , SCREAMING_SNAKE_CASE_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , SCREAMING_SNAKE_CASE_ ):
pass | 32 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : Dict = """falcon"""
__A : Any = ["""past_key_values"""]
def __init__( self , _UpperCamelCase=65024 , _UpperCamelCase=4544 , _UpperCamelCase=32 , _UpperCamelCase=71 , _UpperCamelCase=1e-5 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=11 , _UpperCamelCase=11 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase = kwargs.pop('''n_embed''' , _UpperCamelCase )
_UpperCAmelCase = hidden_size if n_embed is None else n_embed
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = use_cache
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase = alibi
_UpperCAmelCase = new_decoder_architecture
_UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase = parallel_attn
_UpperCAmelCase = bias
super().__init__(bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
@property
def UpperCamelCase( self ):
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase( self ):
return not self.alibi | 32 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCamelCase :
def __init__( self : str , __snake_case : Optional[int] , __snake_case : Tuple=13 , __snake_case : Optional[Any]=7 , __snake_case : List[Any]=True , __snake_case : Any=True , __snake_case : int=True , __snake_case : Dict=True , __snake_case : int=99 , __snake_case : str=32 , __snake_case : Any=2 , __snake_case : Optional[Any]=4 , __snake_case : Dict=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Dict=0.1 , __snake_case : Any=0.1 , __snake_case : Optional[Any]=5_12 , __snake_case : List[str]=16 , __snake_case : Dict=2 , __snake_case : str=0.02 , __snake_case : Tuple=3 , __snake_case : Union[str, Any]=4 , __snake_case : Tuple=None , __snake_case : Optional[Any]=0 , ):
'''simple docstring'''
_snake_case: Optional[Any] = parent
_snake_case: Any = batch_size
_snake_case: Optional[int] = seq_length
_snake_case: Any = is_training
_snake_case: Any = use_input_mask
_snake_case: Union[str, Any] = use_token_type_ids
_snake_case: Dict = use_labels
_snake_case: List[Any] = vocab_size
_snake_case: Union[str, Any] = hidden_size
_snake_case: Optional[Any] = num_hidden_layers
_snake_case: Tuple = num_attention_heads
_snake_case: Dict = intermediate_size
_snake_case: Tuple = hidden_act
_snake_case: List[Any] = hidden_dropout_prob
_snake_case: List[Any] = attention_probs_dropout_prob
_snake_case: List[Any] = max_position_embeddings
_snake_case: Union[str, Any] = type_vocab_size
_snake_case: Any = type_sequence_label_size
_snake_case: str = initializer_range
_snake_case: List[str] = num_labels
_snake_case: List[str] = num_choices
_snake_case: List[Any] = scope
_snake_case: int = projection_dim
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case: Dict = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_snake_case: Any = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case: Tuple = None
if self.use_token_type_ids:
_snake_case: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case: Optional[Any] = None
_snake_case: str = None
_snake_case: Tuple = None
if self.use_labels:
_snake_case: List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case: int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case: List[str] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case: Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
_snake_case: List[Any] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : int ):
'''simple docstring'''
_snake_case: Union[str, Any] = TFDPRContextEncoder(config=lowercase_ )
_snake_case: Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
_snake_case: str = model(lowercase_ , token_type_ids=lowercase_ )
_snake_case: int = model(lowercase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , __snake_case : List[str] , __snake_case : Any , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
_snake_case: Dict = TFDPRQuestionEncoder(config=lowercase_ )
_snake_case: Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
_snake_case: Optional[int] = model(lowercase_ , token_type_ids=lowercase_ )
_snake_case: List[str] = model(lowercase_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : int , __snake_case : List[Any] ):
'''simple docstring'''
_snake_case: Optional[int] = TFDPRReader(config=lowercase_ )
_snake_case: Any = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: Optional[Any] = self.prepare_config_and_inputs()
(
_snake_case
): Tuple = config_and_inputs
_snake_case: Union[str, Any] = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
_SCREAMING_SNAKE_CASE = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Any = TFDPRModelTester(self )
_snake_case: Tuple = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
_snake_case: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case: List[Any] = TFDPRContextEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case: List[str] = TFDPRContextEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case: Union[str, Any] = TFDPRQuestionEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case: Tuple = TFDPRReader.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: Any = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_snake_case: Dict = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
_snake_case: List[Any] = model(lowercase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_snake_case: str = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 704 |
'''simple docstring'''
def lowercase_ ( lowercase__ = 50 ) ->int:
_snake_case: Union[str, Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : int = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = OPTConfig
SCREAMING_SNAKE_CASE__ :Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ :List[Any] = 'gelu'
def __init__( self : List[Any] , __a : List[Any] , __a : Dict=13 , __a : Optional[Any]=7 , __a : str=True , __a : Any=False , __a : int=99 , __a : Optional[int]=16 , __a : int=2 , __a : Dict=4 , __a : List[Any]=4 , __a : Union[str, Any]="gelu" , __a : Any=0.1 , __a : int=0.1 , __a : Optional[int]=20 , __a : int=2 , __a : Tuple=1 , __a : str=0 , __a : Dict=16 , __a : List[str]=16 , ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : List[str] = seq_length
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : str = use_labels
_UpperCamelCase : Union[str, Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : List[str] = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : str = hidden_act
_UpperCamelCase : List[str] = hidden_dropout_prob
_UpperCamelCase : List[str] = attention_probs_dropout_prob
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : Optional[Any] = eos_token_id
_UpperCamelCase : Optional[Any] = pad_token_id
_UpperCamelCase : Optional[int] = bos_token_id
_UpperCamelCase : List[str] = embed_dim
_UpperCamelCase : List[Any] = word_embed_proj_dim
_UpperCamelCase : List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=snake_case_ , **self.config_updates , )
_UpperCamelCase : Optional[Any] = prepare_opt_inputs_dict(snake_case_ , snake_case_ )
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : Any , __a : Optional[int] ) -> List[Any]:
_UpperCamelCase : int = TFOPTModel(config=snake_case_ )
_UpperCamelCase : str = inputs_dict["input_ids"]
_UpperCamelCase : Optional[int] = input_ids[:1, :]
_UpperCamelCase : Dict = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : Any = 1
# first forward pass
_UpperCamelCase : str = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
_UpperCamelCase, _UpperCamelCase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : int = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : str = model(snake_case_ , attention_mask=snake_case_ )[0]
_UpperCamelCase : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case_ , snake_case_ , rtol=1e-3 )
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Tuple = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :Tuple = (TFOPTForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE__ :List[str] = False
SCREAMING_SNAKE_CASE__ :List[str] = False
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 10
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase : List[str] = TFOPTModelTester(self )
_UpperCamelCase : List[Any] = ConfigTester(self , config_class=snake_case_ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
_UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case_ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__a : Optional[Any] , __a : Optional[int] ):
if hasattr(snake_case_ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(snake_case_ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_UpperCamelCase : Optional[Any] = model_class(config=snake_case_ )
_UpperCamelCase : Union[str, Any] = _get_word_embedding_weight(snake_case_ , model.get_input_embeddings() )
_UpperCamelCase : Optional[int] = _get_word_embedding_weight(snake_case_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(snake_case_ )
_UpperCamelCase : List[str] = _get_word_embedding_weight(snake_case_ , model.get_input_embeddings() )
_UpperCamelCase : Optional[int] = _get_word_embedding_weight(snake_case_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_UpperCamelCase : List[str] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , snake_case_ )
# check that weights remain the same after resizing
_UpperCamelCase : Union[str, Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCamelCase : Union[str, Any] = False
self.assertTrue(snake_case_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , snake_case_ )
_UpperCamelCase : str = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCamelCase : List[str] = False
self.assertTrue(snake_case_ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = 99
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Optional[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_UpperCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_UpperCamelCase : Optional[Any] = input_ids.shape[0]
_UpperCamelCase : Union[str, Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Any = TFOPTModel.from_pretrained("facebook/opt-350m" )
_UpperCamelCase : str = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase : int = tf.not_equal(snake_case_ , model.config.pad_token_id )
with tf.GradientTape():
_UpperCamelCase : Union[str, Any] = model(input_ids=snake_case_ , attention_mask=snake_case_ ).last_hidden_state
_UpperCamelCase : Optional[int] = (1, 11, 512)
self.assertEqual(output.shape , snake_case_ )
_UpperCamelCase : str = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case_ , atol=4e-3 ) )
_UpperCamelCase : int = tf.function(snake_case_ , jit_compile=snake_case_ )
_UpperCamelCase : int = xla_generate(snake_case_ , snake_case_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case_ , atol=4e-2 ) )
@require_tf
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
super().setUp()
_UpperCamelCase : List[str] = "facebook/opt-350m"
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
_UpperCamelCase : str = TFOPTForCausalLM.from_pretrained(self.path_model )
_UpperCamelCase : Any = GPTaTokenizer.from_pretrained(self.path_model )
_UpperCamelCase : Union[str, Any] = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_UpperCamelCase : int = tokenizer(snake_case_ , return_tensors="tf" , padding=snake_case_ , add_special_tokens=snake_case_ )
_UpperCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_UpperCamelCase : str = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-4 ) )
_UpperCamelCase : List[str] = tf.function(snake_case_ , jit_compile=snake_case_ )
_UpperCamelCase : Any = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-4 ) )
@require_tf
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : List[Any] = "facebook/opt-125m"
_UpperCamelCase : List[Any] = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_UpperCamelCase : str = []
_UpperCamelCase : List[str] = GPTaTokenizer.from_pretrained(snake_case_ )
_UpperCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(snake_case_ )
for prompt in self.prompts:
_UpperCamelCase : int = tokenizer(snake_case_ , return_tensors="tf" ).input_ids
_UpperCamelCase : Any = model.generate(snake_case_ , max_length=10 )
_UpperCamelCase : int = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
predicted_outputs += generated_string
self.assertListEqual(snake_case_ , snake_case_ )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Optional[int] = "facebook/opt-350m"
_UpperCamelCase : List[str] = GPTaTokenizer.from_pretrained(snake_case_ )
_UpperCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(snake_case_ )
_UpperCamelCase : Optional[int] = "left"
# use different length sentences to test batching
_UpperCamelCase : Optional[int] = [
"Hello, my dog is a little",
"Today, I",
]
_UpperCamelCase : Optional[Any] = tokenizer(snake_case_ , return_tensors="tf" , padding=snake_case_ )
_UpperCamelCase : str = inputs["input_ids"]
_UpperCamelCase : Any = model.generate(input_ids=snake_case_ , attention_mask=inputs["attention_mask"] )
_UpperCamelCase : Any = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
_UpperCamelCase : Optional[Any] = model.generate(input_ids=snake_case_ )
_UpperCamelCase : List[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
_UpperCamelCase : List[Any] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
_UpperCamelCase : List[str] = model.generate(input_ids=snake_case_ , max_length=model.config.max_length - num_paddings )
_UpperCamelCase : Optional[int] = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
_UpperCamelCase : int = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case_ )
_UpperCamelCase : int = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case_ )
_UpperCamelCase : str = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , [non_padded_sentence, padded_sentence] )
def __SCREAMING_SNAKE_CASE ( self : int ) -> int:
_UpperCamelCase : List[str] = "facebook/opt-350m"
_UpperCamelCase : Optional[int] = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(snake_case_ )
_UpperCamelCase : int = TFOPTForCausalLM.from_pretrained(snake_case_ )
for prompt in self.prompts:
_UpperCamelCase : Union[str, Any] = tokenizer(snake_case_ , return_tensors="tf" ).input_ids
_UpperCamelCase : Tuple = model.generate(snake_case_ , max_length=10 )
_UpperCamelCase : Optional[Any] = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
predicted_outputs += generated_string
self.assertListEqual(snake_case_ , snake_case_ )
| 624 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_SCREAMING_SNAKE_CASE = """
import os
"""
_SCREAMING_SNAKE_CASE = """
def foo():
import os
return False
"""
_SCREAMING_SNAKE_CASE = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
except:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_SCREAMING_SNAKE_CASE = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" , SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
__snake_case = os.path.join(SCREAMING_SNAKE_CASE , "test_file.py" )
with open(SCREAMING_SNAKE_CASE , "w" ) as _tmp_file:
_tmp_file.write(SCREAMING_SNAKE_CASE )
__snake_case = get_imports(SCREAMING_SNAKE_CASE )
assert parsed_imports == ["os"]
| 163 | 0 |
from __future__ import annotations
from fractions import Fraction
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _lowercase ( _UpperCAmelCase ) -> list[str]:
lowerCamelCase =[]
lowerCamelCase =11
lowerCamelCase =int("""1""" + """0""" * digit_len )
for num in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
lowerCamelCase =10
return solutions
def _lowercase ( _UpperCAmelCase = 2 ) -> int:
lowerCamelCase =1.0
for fraction in fraction_list(_SCREAMING_SNAKE_CASE ):
lowerCamelCase =Fraction(_SCREAMING_SNAKE_CASE )
result *= frac.denominator / frac.numerator
return int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 701 |
from typing import List
from .keymap import KEYMAP, get_character
def _lowercase ( _UpperCAmelCase ) -> Tuple:
def decorator(_UpperCAmelCase ):
lowerCamelCase =getattr(_UpperCAmelCase , """handle_key""" , [] )
handle += [key]
setattr(_UpperCAmelCase , """handle_key""" , _UpperCAmelCase )
return func
return decorator
def _lowercase ( *_UpperCAmelCase ) -> Tuple:
def decorator(_UpperCAmelCase ):
lowerCamelCase =getattr(_UpperCAmelCase , """handle_key""" , [] )
handle += keys
setattr(_UpperCAmelCase , """handle_key""" , _UpperCAmelCase )
return func
return decorator
class __A ( a ):
def __new__( cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =super().__new__(cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if not hasattr(UpperCAmelCase_ , """key_handler""" ):
setattr(UpperCAmelCase_ , """key_handler""" , {} )
setattr(UpperCAmelCase_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
lowerCamelCase =getattr(UpperCAmelCase_ , """handle_key""" , [] )
for key in handled_keys:
lowerCamelCase =value
return new_cls
@staticmethod
def _snake_case ( cls ):
lowerCamelCase =get_character()
if char != KEYMAP["undefined"]:
lowerCamelCase =ord(UpperCAmelCase_ )
lowerCamelCase =cls.key_handler.get(UpperCAmelCase_ )
if handler:
lowerCamelCase =char
return handler(cls )
else:
return None
def _lowercase ( cls ) -> List[str]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 269 | 0 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]=10_24 , UpperCAmelCase_ : List[Any]=10_24 , UpperCAmelCase_ : List[str]=False , **UpperCAmelCase_ : str ) -> Dict:
__lowerCamelCase : str = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path='train' , **UpperCAmelCase_ )
__lowerCamelCase : List[str] = tok.pad_token_id
def get_lens(UpperCAmelCase_ : Optional[Any] ):
__lowerCamelCase : Union[str, Any] = tqdm(
DataLoader(UpperCAmelCase_ , batch_size=5_12 , num_workers=8 , shuffle=UpperCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__lowerCamelCase : Optional[Any] = []
for batch in dl:
__lowerCamelCase : int = batch['input_ids'].ne(UpperCAmelCase_ ).sum(1 ).tolist()
__lowerCamelCase : Any = batch['labels'].ne(UpperCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
max_lens.append(max(UpperCAmelCase_ , UpperCAmelCase_ ) )
else:
max_lens.extend(UpperCAmelCase_ )
return max_lens
__lowerCamelCase : int = get_lens(UpperCAmelCase_ )
__lowerCamelCase : Any = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path='val' , **UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = get_lens(UpperCAmelCase_ )
pickle_save(UpperCAmelCase_ , train_ds.len_file )
pickle_save(UpperCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 13 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : str = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Tuple = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A__ : str = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
A__ : Tuple = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
):
__lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
__lowerCamelCase : Union[str, Any] = do_lower_case
__lowerCamelCase : str = strip_accents
__lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = do_lower_case
def __getstate__( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.__dict__.copy()
__lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = d
__lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab()
__lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any:
__lowerCamelCase : Tuple = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 1 |
'''simple docstring'''
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
UpperCAmelCase_ = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class __lowercase :
_a = 42
_a = None
_a = None
_a = None
_a = None
def UpperCamelCase__ ( self ) -> Any:
__a , __a , __a = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Union[str, Any]:
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def UpperCamelCase__ ( self ) -> List[str]:
return self.major, self.minor, self.patch
def UpperCamelCase__ ( self , UpperCamelCase ) -> Dict:
if isinstance(UpperCamelCase , UpperCamelCase ):
return Version(UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ):
return other
raise TypeError(f"{other} (type {type(UpperCamelCase )}) cannot be compared to version." )
def __eq__( self , UpperCamelCase ) -> List[str]:
try:
__a = self._validate_operand(UpperCamelCase )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , UpperCamelCase ) -> Any:
__a = self._validate_operand(UpperCamelCase )
return self.tuple < other.tuple
def __hash__( self ) -> Optional[int]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCamelCase__ ( cls , UpperCamelCase ) -> Optional[Any]:
__a = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCamelCase__ ( self ) -> str:
return self.version_str
def SCREAMING_SNAKE_CASE ( a_ : List[Any] ):
__a = _VERSION_REG.match(a_ )
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(a_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def SCREAMING_SNAKE_CASE ( a_ : str ):
return ".".join(str(a_ ) for v in version_tuple )
| 718 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase_ = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def SCREAMING_SNAKE_CASE ( ):
__a = Github(os.environ['GITHUB_TOKEN'] )
__a = g.get_repo('huggingface/diffusers' )
__a = repo.get_issues(state='open' )
for issue in open_issues:
__a = sorted(issue.get_comments() , key=lambda a_ : i.created_at , reverse=a_ )
__a = comments[0] if len(a_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 490 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Dict=10 , __SCREAMING_SNAKE_CASE : List[Any]=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : List[Any]=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]="relu" , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Dict=None , ) -> Dict:
a_ : Optional[Any] = parent
a_ : int = batch_size
a_ : Tuple = image_size
a_ : List[Any] = num_channels
a_ : Optional[Any] = embeddings_size
a_ : str = hidden_sizes
a_ : Any = depths
a_ : Dict = is_training
a_ : Union[str, Any] = use_labels
a_ : str = hidden_act
a_ : Optional[int] = num_labels
a_ : Any = scope
a_ : str = len(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
a_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : str = None
if self.use_labels:
a_ : str = ids_tensor([self.batch_size] , self.num_labels )
a_ : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
a_ : str = TFResNetModel(config=__SCREAMING_SNAKE_CASE )
a_ : Any = model(__SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
a_ : Any = self.num_labels
a_ : str = TFResNetForImageClassification(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
a_ : str = self.prepare_config_and_inputs()
a_ , a_ , a_ : str = config_and_inputs
a_ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , unittest.TestCase ):
snake_case__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
snake_case__ = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
a_ : Tuple = TFResNetModelTester(self )
a_ : Optional[int] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
return
@unittest.skip(reason='''ResNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
pass
@unittest.skip(reason='''ResNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[int] = model_class(__SCREAMING_SNAKE_CASE )
a_ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Dict = [*signature.parameters.keys()]
a_ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict ):
a_ : List[str] = model_class(__SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
a_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ , a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Dict = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
a_ : Tuple = layer_type
a_ : Any = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : int = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Tuple = TFResNetModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ):
a_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ : Optional[int] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
a_ : int = self.default_image_processor
a_ : Tuple = prepare_img()
a_ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
a_ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
a_ : List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
a_ : str = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 466 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 314 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( snake_case__ ):
UpperCAmelCase_ = """data2vec-vision"""
def __init__( self , lowercase_=7_68 , lowercase_=12 , lowercase_=12 , lowercase_=30_72 , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1E-12 , lowercase_=2_24 , lowercase_=16 , lowercase_=3 , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=0.1 , lowercase_=0.1 , lowercase_=True , lowercase_=[3, 5, 7, 11] , lowercase_=[1, 2, 3, 6] , lowercase_=True , lowercase_=0.4 , lowercase_=2_56 , lowercase_=1 , lowercase_=False , lowercase_=2_55 , **lowercase_ , ):
super().__init__(**lowercase_)
snake_case_ : int = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : List[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Tuple = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : List[str] = initializer_range
snake_case_ : Any = layer_norm_eps
snake_case_ : Optional[int] = image_size
snake_case_ : str = patch_size
snake_case_ : Dict = num_channels
snake_case_ : Any = use_mask_token
snake_case_ : Dict = use_absolute_position_embeddings
snake_case_ : Union[str, Any] = use_relative_position_bias
snake_case_ : Any = use_shared_relative_position_bias
snake_case_ : Tuple = layer_scale_init_value
snake_case_ : Optional[Any] = drop_path_rate
snake_case_ : str = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case_ : Dict = out_indices
snake_case_ : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case_ : Any = use_auxiliary_head
snake_case_ : Union[str, Any] = auxiliary_loss_weight
snake_case_ : Any = auxiliary_channels
snake_case_ : int = auxiliary_num_convs
snake_case_ : List[str] = auxiliary_concat_input
snake_case_ : Any = semantic_loss_ignore_index
class UpperCAmelCase_ ( snake_case__ ):
UpperCAmelCase_ = version.parse("""1.11""" )
@property
def snake_case__ ( self):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
return 1E-4
| 92 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
a_ = ["small", "medium", "large"]
a_ = "lm_head.decoder.weight"
a_ = "lm_head.weight"
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : List[Any] = torch.load(__SCREAMING_SNAKE_CASE )
snake_case_ : List[Any] = d.pop(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE, exist_ok=__SCREAMING_SNAKE_CASE )
torch.save(__SCREAMING_SNAKE_CASE, os.path.join(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
a_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
a_ = os.path.join(args.dialogpt_path, f"""{MODEL}_ft.pkl""")
a_ = f"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 92 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = torch.device('''cpu''')
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
def UpperCamelCase ( snake_case__ : str ) -> int:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Optional[int] = dct.pop(snake_case__ )
UpperCamelCase : int = val
def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]:
UpperCamelCase : List[str] = []
for k in state_dict.keys():
UpperCamelCase : Optional[Any] = k
if ".pwconv" in k:
UpperCamelCase : Dict = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
UpperCamelCase : Optional[Any] = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
UpperCamelCase : Optional[Any] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
UpperCamelCase : Optional[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
UpperCamelCase : Dict = k_new.split('.' )
if ls[2].isdigit():
UpperCamelCase : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
UpperCamelCase : int = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : str ) -> List[Any]:
UpperCamelCase : Dict = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCamelCase : str = 1000
UpperCamelCase : List[Any] = 'huggingface/label-files'
UpperCamelCase : str = 'imagenet-1k-id2label.json'
UpperCamelCase : List[Any] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
UpperCamelCase : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCamelCase : List[Any] = idalabel
UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCamelCase : Any = [3, 3, 6, 4]
UpperCamelCase : Optional[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
UpperCamelCase : Optional[Any] = [3, 3, 9, 6]
UpperCamelCase : str = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
UpperCamelCase : Optional[Any] = [4, 3, 10, 5]
UpperCamelCase : Union[str, Any] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
UpperCamelCase : List[Any] = [4, 4, 12, 6]
UpperCamelCase : List[str] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
UpperCamelCase : Any = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' , check_hash=snake_case__ )
else:
UpperCamelCase : Optional[Any] = torch.load(snake_case__ , map_location='cpu' )
UpperCamelCase : int = checkpoint
UpperCamelCase : str = create_rename_keys(snake_case__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# load HuggingFace model
UpperCamelCase : str = SwiftFormerForImageClassification(snake_case__ ).eval()
hf_model.load_state_dict(snake_case__ )
# prepare test inputs
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Optional[int] = ViTImageProcessor.from_pretrained('preprocessor_config' )
UpperCamelCase : int = processor(images=snake_case__ , return_tensors='pt' )
# compare outputs from both models
UpperCamelCase : Tuple = get_expected_output(snake_case__ )
UpperCamelCase : Optional[Any] = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , snake_case__ , atol=1E-3 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
__UpperCAmelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 40 |
def A ( lowercase__ : int ) -> Optional[Any]:
stooge(lowercase__ , 0 , len(lowercase__ ) - 1 )
return arr
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowercase__ , i + t , (lowercase__) )
# Recursively sort first 2/3 elements
stooge(lowercase__ , lowercase__ , (h - t) )
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted)) | 45 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def lowercase_ ( __UpperCAmelCase ) -> Optional[Any]:
class _lowerCamelCase :
def __init__( self : List[str] , UpperCamelCase : str ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = metric_id
class _lowerCamelCase :
_lowerCamelCase :str = [MetricMock(a_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
if "tmp_path" in args:
lowerCAmelCase__ : Dict = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(__UpperCAmelCase , match="""https://huggingface.co/docs/evaluate""" ):
func(*__UpperCAmelCase )
| 507 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _lowerCamelCase ( a_ , unittest.TestCase ):
_lowerCamelCase :Dict = FlaxAutoencoderKL
@property
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 4
lowerCAmelCase__ : Tuple = 3
lowerCAmelCase__ : Union[str, Any] = (32, 32)
lowerCAmelCase__ : Dict = jax.random.PRNGKey(0 )
lowerCAmelCase__ : Optional[int] = jax.random.uniform(UpperCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowerCAmelCase__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
| 507 | 1 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
class A__ ( metaclass=snake_case__ ):
"""simple docstring"""
__magic_name__ = ['flax', 'transformers']
def __init__( self , *__snake_case , **__snake_case ):
requires_backends(self , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
@classmethod
def a_ ( cls , *__snake_case , **__snake_case ):
requires_backends(cls , ['''flax''', '''transformers'''] )
| 550 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
_SCREAMING_SNAKE_CASE : Any = {
"vocab_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"
),
},
"merges_file": {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt",
"allenai/longformer-large-4096": (
"https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"
),
},
}
_SCREAMING_SNAKE_CASE : List[Any] = {
"allenai/longformer-base-4096": 40_96,
"allenai/longformer-large-4096": 40_96,
"allenai/longformer-large-4096-finetuned-triviaqa": 40_96,
"allenai/longformer-base-4096-extra.pos.embd.only": 40_96,
"allenai/longformer-large-4096-extra.pos.embd.only": 40_96,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCAmelCase__ ():
"""simple docstring"""
snake_case = (
list(range(ord('''!''' ) ,ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) ,ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) ,ord('''ÿ''' ) + 1 ) )
)
snake_case = bs[:]
snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase_ )
cs.append(2**8 + n )
n += 1
snake_case = [chr(UpperCamelCase_ ) for n in cs]
return dict(zip(UpperCamelCase_ ,UpperCamelCase_ ) )
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = set()
snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case = char
return pairs
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['input_ids', 'attention_mask']
def __init__( self , __snake_case , __snake_case , __snake_case="replace" , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case=False , **__snake_case , ):
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
super().__init__(
errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
snake_case = json.load(__snake_case )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = errors # how to handle errors in decoding
snake_case = bytes_to_unicode()
snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(__snake_case , encoding='''utf-8''' ) as merges_handle:
snake_case = merges_handle.read().split('''\n''' )[1:-1]
snake_case = [tuple(merge.split() ) for merge in bpe_merges]
snake_case = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case = {}
snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def a_ ( self ):
return len(self.encoder )
def a_ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a_ ( self , __snake_case ):
if token in self.cache:
return self.cache[token]
snake_case = tuple(__snake_case )
snake_case = get_pairs(__snake_case )
if not pairs:
return token
while True:
snake_case = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case , snake_case = bigram
snake_case = []
snake_case = 0
while i < len(__snake_case ):
try:
snake_case = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case = tuple(__snake_case )
snake_case = new_word
if len(__snake_case ) == 1:
break
else:
snake_case = get_pairs(__snake_case )
snake_case = ''' '''.join(__snake_case )
snake_case = word
return word
def a_ ( self , __snake_case ):
snake_case = []
for token in re.findall(self.pat , __snake_case ):
snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(''' ''' ) )
return bpe_tokens
def a_ ( self , __snake_case ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def a_ ( self , __snake_case ):
return self.decoder.get(__snake_case )
def a_ ( self , __snake_case ):
snake_case = ''''''.join(__snake_case )
snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def a_ ( self , __snake_case , __snake_case = None ):
if not os.path.isdir(__snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' )
snake_case = 0
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
snake_case = token_index
writer.write(''' '''.join(__snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def a_ ( self , __snake_case , __snake_case = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case = [self.cls_token_id]
snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self , __snake_case , __snake_case = None , __snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def a_ ( self , __snake_case , __snake_case = None ):
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a_ ( self , __snake_case , __snake_case=False , **__snake_case ):
snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()):
snake_case = ''' ''' + text
return (text, kwargs)
| 550 | 1 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
__magic_name__ : List[str] = sys.version_info >= (3, 10)
def A__ ( A_=None , A_=None ) -> Union[str, Any]:
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = 42
UpperCAmelCase__ = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = None
class UpperCamelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 'titi'
UpperCAmelCase__ = 'toto'
class UpperCamelCase__ ( UpperCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 'titi'
UpperCAmelCase__ = 'toto'
UpperCAmelCase__ = 42
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = 'toto'
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = BasicEnum(self.foo )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = 'toto'
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = None
UpperCAmelCase__ = field(default=UpperCamelCase__ , metadata={'help': 'help message'} )
UpperCAmelCase__ = None
UpperCAmelCase__ = list_field(default=[] )
UpperCAmelCase__ = list_field(default=[] )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = list_field(default=[] )
UpperCAmelCase__ = list_field(default=[1, 2, 3] )
UpperCAmelCase__ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
UpperCAmelCase__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = field()
UpperCAmelCase__ = field()
UpperCAmelCase__ = field()
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = 42
UpperCAmelCase__ = field()
UpperCAmelCase__ = None
UpperCAmelCase__ = field(default='toto' , metadata={'help': 'help message'} )
UpperCAmelCase__ = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = None
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = None
UpperCAmelCase__ = field(default=UpperCamelCase__ , metadata={'help': 'help message'} )
UpperCAmelCase__ = None
UpperCAmelCase__ = list_field(default=[] )
UpperCAmelCase__ = list_field(default=[] )
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : str , __A : argparse.ArgumentParser , __A : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_lowercase = {k: v for k, v in vars(__A ).items() if k != "container"}
_lowercase = {k: v for k, v in vars(__A ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __A ) and yy.get("choices" , __A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__A ) , yy["type"](__A ) )
del xx["type"], yy["type"]
self.assertEqual(__A , __A )
def snake_case ( self : int ):
"""simple docstring"""
_lowercase = HfArgumentParser(__A )
_lowercase = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__A , required=__A )
expected.add_argument("--bar" , type=__A , required=__A )
expected.add_argument("--baz" , type=__A , required=__A )
expected.add_argument("--flag" , type=__A , default=__A , const=__A , nargs="?" )
self.argparsersEqual(__A , __A )
_lowercase = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((_lowercase ) , ) = parser.parse_args_into_dataclasses(__A , look_for_args_file=__A )
self.assertFalse(example.flag )
def snake_case ( self : Tuple ):
"""simple docstring"""
_lowercase = HfArgumentParser(__A )
_lowercase = argparse.ArgumentParser()
expected.add_argument("--foo" , default=4_2 , type=__A )
expected.add_argument("--baz" , default="toto" , type=__A , help="help message" )
self.argparsersEqual(__A , __A )
def snake_case ( self : List[str] ):
"""simple docstring"""
_lowercase = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__A , default=__A , const=__A , nargs="?" )
expected.add_argument("--baz" , type=__A , default=__A , const=__A , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__A , dest="baz" )
expected.add_argument("--opt" , type=__A , default=__A )
_lowercase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
_lowercase = HfArgumentParser(__A )
self.argparsersEqual(__A , __A )
_lowercase = parser.parse_args([] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
_lowercase = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
_lowercase = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
_lowercase = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
_lowercase = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = HfArgumentParser(__A )
_lowercase = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 4_2] , type=make_choice_type_function(["titi", "toto", 4_2] ) , )
self.argparsersEqual(__A , __A )
_lowercase = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
_lowercase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_lowercase = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
_lowercase = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_lowercase = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 4_2 )
_lowercase = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case ( self : int ):
"""simple docstring"""
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase__ = 'toto'
_lowercase = HfArgumentParser(__A )
_lowercase = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 4_2) , type=make_choice_type_function(["titi", "toto", 4_2] ) , )
self.argparsersEqual(__A , __A )
_lowercase = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
_lowercase = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
_lowercase = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 4_2 )
def snake_case ( self : Any ):
"""simple docstring"""
_lowercase = HfArgumentParser(__A )
_lowercase = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__A )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__A )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__A )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__A )
self.argparsersEqual(__A , __A )
_lowercase = parser.parse_args([] )
self.assertEqual(
__A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
_lowercase = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__A , type=__A )
expected.add_argument("--bar" , default=__A , type=__A , help="help message" )
expected.add_argument("--baz" , default=__A , type=__A )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__A )
expected.add_argument("--des" , nargs="+" , default=[] , type=__A )
_lowercase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
_lowercase = HfArgumentParser(__A )
self.argparsersEqual(__A , __A )
_lowercase = parser.parse_args([] )
self.assertEqual(__A , Namespace(foo=__A , bar=__A , baz=__A , ces=[] , des=[] ) )
_lowercase = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__A , Namespace(foo=1_2 , bar=3.1_4 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def snake_case ( self : Dict ):
"""simple docstring"""
_lowercase = HfArgumentParser(__A )
_lowercase = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__A , required=__A )
expected.add_argument("--required_str" , type=__A , required=__A )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__A , )
self.argparsersEqual(__A , __A )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = HfArgumentParser(__A )
_lowercase = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__A , required=__A )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__A , )
expected.add_argument("--opt" , type=__A , default=__A )
expected.add_argument("--baz" , default="toto" , type=__A , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__A )
self.argparsersEqual(__A , __A )
def snake_case ( self : int ):
"""simple docstring"""
_lowercase = HfArgumentParser(__A )
_lowercase = {
"foo": 1_2,
"bar": 3.1_4,
"baz": "42",
"flag": True,
}
_lowercase = parser.parse_dict(__A )[0]
_lowercase = BasicExample(**__A )
self.assertEqual(__A , __A )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = HfArgumentParser(__A )
_lowercase = {
"foo": 1_2,
"bar": 3.1_4,
"baz": "42",
"flag": True,
"extra": 4_2,
}
self.assertRaises(__A , parser.parse_dict , __A , allow_extra_keys=__A )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = HfArgumentParser(__A )
_lowercase = {
"foo": 1_2,
"bar": 3.1_4,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase = os.path.join(__A , "temp_json" )
os.mkdir(__A )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__A , __A )
_lowercase = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
_lowercase = BasicExample(**__A )
self.assertEqual(__A , __A )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
_lowercase = HfArgumentParser(__A )
_lowercase = {
"foo": 1_2,
"bar": 3.1_4,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowercase = os.path.join(__A , "temp_yaml" )
os.mkdir(__A )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__A , __A )
_lowercase = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
_lowercase = BasicExample(**__A )
self.assertEqual(__A , __A )
def snake_case ( self : str ):
"""simple docstring"""
_lowercase = HfArgumentParser(__A )
self.assertIsNotNone(__A )
| 715 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : str=None ):
"""simple docstring"""
_lowercase = data
_lowercase = None
def __repr__( self : Optional[int] ):
"""simple docstring"""
_lowercase = []
_lowercase = self
while temp:
string_rep.append(f"""{temp.data}""" )
_lowercase = temp.next
return "->".join(__A )
def A__ ( A_ ) -> Any:
if not elements_list:
raise Exception("The Elements List is empty" )
_lowercase = _lowercase = Node(elements_list[0] )
for i in range(1 , len(A_ ) ):
_lowercase = Node(elements_list[i] )
_lowercase = current.next
return head
def A__ ( A_ ) -> None:
if head_node is not None and isinstance(A_ , A_ ):
print_reverse(head_node.next )
print(head_node.data )
def A__ ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_lowercase = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(A_ )
print("Elements in Reverse:" )
print_reverse(A_ )
if __name__ == "__main__":
main()
| 602 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A__ : str = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 13 |
'''simple docstring'''
def snake_case_ ( lowercase__ = "The quick brown fox jumps over the lazy dog" , ):
UpperCAmelCase__ : Dict = set()
# Replace all the whitespace in our sentence
UpperCAmelCase__ : str = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowercase__ ) == 2_6
def snake_case_ ( lowercase__ = "The quick brown fox jumps over the lazy dog" , ):
UpperCAmelCase__ : str = [False] * 2_6
for char in input_str:
if char.islower():
UpperCAmelCase__ : List[Any] = True
elif char.isupper():
UpperCAmelCase__ : List[Any] = True
return all(lowercase__ )
def snake_case_ ( lowercase__ = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def snake_case_ ( ):
from timeit import timeit
UpperCAmelCase__ : Union[str, Any] = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=lowercase__ ) )
print(timeit("is_pangram_faster()" , setup=lowercase__ ) )
print(timeit("is_pangram_fastest()" , setup=lowercase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 199 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a: Optional[int] = logging.get_logger(__name__)
_a: Optional[int] = {"""tokenizer_file""": """tokenizer.json"""}
_a: Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __UpperCamelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE__ = None
def __init__( self : Any , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Tuple="<unk>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Tuple="<pad>" , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : Dict=False , **lowerCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase_ = getattr(lowerCAmelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase_ = add_prefix_space
def __A ( self : List[Any] , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = kwargs.get("is_split_into_words" , lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Union[str, Any] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Any ):
'''simple docstring'''
UpperCAmelCase_ = kwargs.get("is_split_into_words" , lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] = None ):
'''simple docstring'''
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def __A ( self : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids | 701 |
from __future__ import annotations
class __UpperCamelCase :
def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = text, pattern
UpperCAmelCase_ , UpperCAmelCase_ = len(lowerCAmelCase ), len(lowerCAmelCase )
def __A ( self : List[str] , lowerCAmelCase : str ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __A ( self : int , lowerCAmelCase : int ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCAmelCase_ = self.mismatch_in_text(lowerCAmelCase )
if mismatch_index == -1:
positions.append(lowerCAmelCase )
else:
UpperCAmelCase_ = self.match_in_pattern(self.text[mismatch_index] )
UpperCAmelCase_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_a: Dict = """ABAABA"""
_a: Union[str, Any] = """AB"""
_a: str = BoyerMooreSearch(text, pattern)
_a: Dict = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions) | 268 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase =get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase =5
UpperCAmelCase =10
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = SpeechaTextTokenizer
_lowerCamelCase = False
_lowerCamelCase = True
def UpperCamelCase__ ( self ) -> Dict:
super().setUp()
A = sp.SentencePieceProcessor()
spm_model.Load(lowerCamelCase_ )
A = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCamelCase_ ) )]
A = dict(zip(lowerCamelCase_ ,range(len(lowerCamelCase_ ) ) ) )
A = Path(self.tmpdirname )
save_json(lowerCamelCase_ ,save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCamelCase_ ,save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
A = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = """<pad>"""
A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) ,lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> int:
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-1] ,"""j""" )
self.assertEqual(len(lowerCamelCase_ ) ,1_0_0_1 )
def UpperCamelCase__ ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_1 )
def UpperCamelCase__ ( self ) -> Optional[int]:
A = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] ,)
A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] ,)
A = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,[1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
A = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] ,)
@slow
def UpperCamelCase__ ( self ) -> Tuple:
# fmt: off
A = {"""input_ids""": [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ ,model_name="""facebook/s2t-small-mustc-en-de-st""" ,revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" ,)
@require_sentencepiece
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = '''valhalla/s2t_mustc_multilinguial_medium'''
_lowerCamelCase = '''C\'est trop cool'''
_lowerCamelCase = '''Esto es genial'''
@classmethod
def UpperCamelCase__ ( cls ) -> Any:
A = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def UpperCamelCase__ ( self ) -> str:
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] ,4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] ,6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] ,9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] ,1_1 )
def UpperCamelCase__ ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.vocab_size ,1_0_0_0_0 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.assertIn(lowerCamelCase_ ,self.tokenizer.all_special_ids )
A = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
A = self.tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
A = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> int:
A = """fr"""
A = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] ,lowerCamelCase_ )
self.assertEqual(encoded[-1] ,self.tokenizer.eos_token_id )
def UpperCamelCase__ ( self ) -> Any:
A = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens ,[FR_CODE] )
A = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens ,[ES_CODE] )
| 617 |
"""simple docstring"""
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
A = {}
def UpperCamelCase__ ( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(lowerCamelCase_ ,""" -> """ ,""" -> """.join([str(lowerCamelCase_ ) for j in self.vertex[i]] ) )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCamelCase_ )
else:
# else make a new vertex
A = [to_vertex]
def UpperCamelCase__ ( self ) -> None:
# visited array for storing already visited nodes
A = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCamelCase_ ,lowerCamelCase_ )
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> None:
# mark start vertex as visited
A = True
print(lowerCamelCase_ ,end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCamelCase_ ,lowerCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 617 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 0, 0
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
for _ in range(1 , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ugly_nums.append(UpperCamelCase__ )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(200) = }''') | 712 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = original_name.split(""".""" )[0]
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(UpperCamelCase__ ) - 2] )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(UpperCamelCase__ ) - 1] )
SCREAMING_SNAKE_CASE__ = orig_block_num - offset
SCREAMING_SNAKE_CASE__ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
SCREAMING_SNAKE_CASE__ = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE__ = key[: key.find("""proj""" )]
SCREAMING_SNAKE_CASE__ = key.replace(UpperCamelCase__ , f'''patch_embeddings.{total_embed_found}.''' )
SCREAMING_SNAKE_CASE__ = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE__ = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """norm1""" , """before_norm""" )
if "norm2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""head""" , """classifier""" )
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ = model_name[-3:]
SCREAMING_SNAKE_CASE__ = 1_000
SCREAMING_SNAKE_CASE__ = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ = (1, 1_000)
# set config attributes
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE__ = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE__ = [4, 4, 12, 4]
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [96, 192, 384, 768]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
elif size == "m48":
SCREAMING_SNAKE_CASE__ = [8, 8, 24, 8]
SCREAMING_SNAKE_CASE__ = [96, 192, 384, 768]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
# Prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
SCREAMING_SNAKE_CASE__ = rename_keys(UpperCamelCase__ )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ = PoolFormerForImageClassification(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowerCamelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 59 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowercase__ = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
lowercase__ = 3E8 # unit of c : m * s^-1
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
lowerCAmelCase_ : Tuple = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCAmelCase_ : Any = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCAmelCase_ : str = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 610 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 610 | 1 |
def a__ ( snake_case__ ) -> list:
if n_term == "":
return []
lowerCamelCase = []
for temp in range(int(snake_case__ ) ):
series.append(F'1/{temp + 1}' if series else """1""" )
return series
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 710 |
"""simple docstring"""
def a__ ( snake_case__ ) -> list:
if n_term == "":
return []
lowerCamelCase = []
for temp in range(int(snake_case__ ) ):
series.append(F'1/{temp + 1}' if series else """1""" )
return series
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 533 | 0 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
__snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __snake_case ):
lowercase = """AutoTokenizer"""
lowercase = ["""tokenizer"""]
lowercase = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : List[Any] , __magic_name__ : Any , __magic_name__ : Optional[int]=None ):
"""simple docstring"""
super().__init__(__magic_name__ )
UpperCamelCase = speaker_embeddings
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : Dict="speaker_embeddings_path.json" , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
UpperCamelCase = get_file_from_repo(
__magic_name__ , __magic_name__ , subfolder=kwargs.pop("""subfolder""" , __magic_name__ ) , cache_dir=kwargs.pop("""cache_dir""" , __magic_name__ ) , force_download=kwargs.pop("""force_download""" , __magic_name__ ) , proxies=kwargs.pop("""proxies""" , __magic_name__ ) , resume_download=kwargs.pop("""resume_download""" , __magic_name__ ) , local_files_only=kwargs.pop("""local_files_only""" , __magic_name__ ) , use_auth_token=kwargs.pop("""use_auth_token""" , __magic_name__ ) , revision=kwargs.pop("""revision""" , __magic_name__ ) , )
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(__magic_name__ , __magic_name__ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
UpperCamelCase = None
else:
with open(__magic_name__ ) as speaker_embeddings_json:
UpperCamelCase = json.load(__magic_name__ )
else:
UpperCamelCase = None
UpperCamelCase = AutoTokenizer.from_pretrained(__magic_name__ , **__magic_name__ )
return cls(tokenizer=__magic_name__ , speaker_embeddings=__magic_name__ )
def lowerCamelCase_ ( self : int , __magic_name__ : Dict , __magic_name__ : Union[str, Any]="speaker_embeddings_path.json" , __magic_name__ : List[Any]="speaker_embeddings" , __magic_name__ : bool = False , **__magic_name__ : Tuple , ):
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(__magic_name__ , __magic_name__ , """v2""" ) , exist_ok=__magic_name__ )
UpperCamelCase = {}
UpperCamelCase = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
UpperCamelCase = self._load_voice_preset(__magic_name__ )
UpperCamelCase = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["""repo_or_path"""] , __magic_name__ , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=__magic_name__ , )
UpperCamelCase = os.path.join(__magic_name__ , F'{prompt_key}_{key}.npy' )
UpperCamelCase = tmp_dict
with open(os.path.join(__magic_name__ , __magic_name__ ) , """w""" ) as fp:
json.dump(__magic_name__ , __magic_name__ )
super().save_pretrained(__magic_name__ , __magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : List[str] , __magic_name__ : str = None , **__magic_name__ : str ):
"""simple docstring"""
UpperCamelCase = self.speaker_embeddings[voice_preset]
UpperCamelCase = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
UpperCamelCase = get_file_from_repo(
self.speaker_embeddings.get("""repo_or_path""" , """/""" ) , voice_preset_paths[key] , subfolder=kwargs.pop("""subfolder""" , __magic_name__ ) , cache_dir=kwargs.pop("""cache_dir""" , __magic_name__ ) , force_download=kwargs.pop("""force_download""" , __magic_name__ ) , proxies=kwargs.pop("""proxies""" , __magic_name__ ) , resume_download=kwargs.pop("""resume_download""" , __magic_name__ ) , local_files_only=kwargs.pop("""local_files_only""" , __magic_name__ ) , use_auth_token=kwargs.pop("""use_auth_token""" , __magic_name__ ) , revision=kwargs.pop("""revision""" , __magic_name__ ) , )
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
UpperCamelCase = np.load(__magic_name__ )
return voice_preset_dict
def lowerCamelCase_ ( self : Optional[Any] , __magic_name__ : Optional[dict] = None ):
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : List[str] , __magic_name__ : List[Any]=None , __magic_name__ : Any=None , __magic_name__ : Tuple="pt" , __magic_name__ : Dict=2_5_6 , __magic_name__ : Optional[Any]=False , __magic_name__ : List[Any]=True , __magic_name__ : Optional[Any]=False , **__magic_name__ : Dict , ):
"""simple docstring"""
if voice_preset is not None and not isinstance(__magic_name__ , __magic_name__ ):
if (
isinstance(__magic_name__ , __magic_name__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
UpperCamelCase = self._load_voice_preset(__magic_name__ )
else:
if isinstance(__magic_name__ , __magic_name__ ) and not voice_preset.endswith(""".npz""" ):
UpperCamelCase = voice_preset + """.npz"""
UpperCamelCase = np.load(__magic_name__ )
if voice_preset is not None:
self._validate_voice_preset_dict(__magic_name__ , **__magic_name__ )
UpperCamelCase = BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
UpperCamelCase = self.tokenizer(
__magic_name__ , return_tensors=__magic_name__ , padding="""max_length""" , max_length=__magic_name__ , return_attention_mask=__magic_name__ , return_token_type_ids=__magic_name__ , add_special_tokens=__magic_name__ , **__magic_name__ , )
if voice_preset is not None:
UpperCamelCase = voice_preset
return encoded_text
| 386 |
import math
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowercase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=1 , **SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = factor * value
UpperCamelCase = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 386 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_UpperCamelCase = ["""small""", """medium""", """large"""]
_UpperCamelCase = """lm_head.decoder.weight"""
_UpperCamelCase = """lm_head.weight"""
def _lowerCAmelCase( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict ) -> int:
lowerCAmelCase__ = torch.load(__snake_case )
lowerCAmelCase__ = d.pop(__snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
_UpperCamelCase = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_UpperCamelCase = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl')
_UpperCamelCase = f'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 712 |
'''simple docstring'''
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> int:
assert column_title.isupper()
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(UpperCAmelCase_ ) - 1
lowerCAmelCase__ = 0
while index >= 0:
lowerCAmelCase__ = (ord(column_title[index] ) - 64) * pow(26 , UpperCAmelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 211 | 0 |
from __future__ import annotations
from random import choice
def __a ( A__ : int ):
return choice(A__ )
def __a ( A__ : list[int] , A__ : int ):
SCREAMING_SNAKE_CASE = random_pivot(A__ )
# partition based on pivot
# linear time
SCREAMING_SNAKE_CASE = [e for e in lst if e < pivot]
SCREAMING_SNAKE_CASE = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(A__ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(A__ ) < k - 1:
return kth_number(A__ , k - len(A__ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 16 |
from __future__ import annotations
def __UpperCAmelCase ( __A , __A , __A , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif stress < 0:
raise ValueError("Stress cannot be negative" )
elif tangential_force < 0:
raise ValueError("Tangential Force cannot be negative" )
elif area < 0:
raise ValueError("Area cannot be negative" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 475 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCAmelCase_ ( __a : str , __a : str , __a : str , __a : PreTrainedTokenizer , __a : int , __a : Optional[int] = None , ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = {}
if train_file is not None:
_lowerCamelCase : List[str] = [train_file]
if eval_file is not None:
_lowerCamelCase : Union[str, Any] = [eval_file]
if test_file is not None:
_lowerCamelCase : Union[str, Any] = [test_file]
_lowerCamelCase : int = datasets.load_dataset('csv' , data_files=__a )
_lowerCamelCase : Optional[Any] = list(ds[list(files.keys() )[0]].features.keys() )
_lowerCamelCase : int = features_name.pop(__a )
_lowerCamelCase : str = list(set(ds[list(files.keys() )[0]][label_name] ) )
_lowerCamelCase : str = {label: i for i, label in enumerate(__a )}
_lowerCamelCase : Dict = tokenizer.model_input_names
_lowerCamelCase : Optional[Any] = {}
if len(__a ) == 1:
for k in files.keys():
_lowerCamelCase : str = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding='max_length' ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
_lowerCamelCase : str = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding='max_length' , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_lowerCamelCase : Tuple = {k: v for k, v in ex.items() if k in input_names}
_lowerCamelCase : Dict = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_lowerCamelCase : str = {k: v for k, v in ex.items() if k in input_names}
_lowerCamelCase : Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_lowerCamelCase : List[Any] = {k: v for k, v in ex.items() if k in input_names}
_lowerCamelCase : List[Any] = labelaid[ex[label_name]]
yield (d, label)
_lowerCamelCase : Optional[Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_lowerCamelCase : List[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_lowerCamelCase : str = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_lowerCamelCase : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_lowerCamelCase : Tuple = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_lowerCamelCase : Union[str, Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a_ = logging.getLogger(__name__)
@dataclass
class A_:
"""simple docstring"""
a_ : int = field(metadata={"""help""": """Which column contains the label"""} )
a_ : str = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """The path of the training file"""} )
a_ : Optional[str] = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """The path of the development file"""} )
a_ : Optional[str] = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """The path of the test file"""} )
a_ : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a_ : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class A_:
"""simple docstring"""
a_ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a_ : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a_ : Optional[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
f"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
f"16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_lowerCamelCase : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_lowerCamelCase : int = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a : EvalPrediction ) -> Dict:
_lowerCamelCase : str = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_lowerCamelCase : Any = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCamelCase : List[str] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCamelCase : Dict = trainer.evaluate()
_lowerCamelCase : List[Any] = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(__a , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f" {key} = {value}" )
writer.write(f"{key} = {value}\n" )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 349 |
"""simple docstring"""
def UpperCAmelCase_ ( __a : Dict , __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase_ ( __a : List[Any] ):
'''simple docstring'''
return data[1:] + data[0]
def UpperCAmelCase_ ( __a : Any , __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = ''
for i in range(len(__a ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase_ ( __a : List[Any] , __a : str ):
'''simple docstring'''
_lowerCamelCase : str = int('0b' + data[0] + data[-1] , 2 )
_lowerCamelCase : Optional[int] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase_ ( __a : Dict , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = message[:4]
_lowerCamelCase : Optional[Any] = message[4:]
_lowerCamelCase : Union[str, Any] = apply_table(__a , __a )
_lowerCamelCase : int = xor(__a , __a )
_lowerCamelCase : str = apply_sbox(__a , temp[:4] ) # noqa: E741
_lowerCamelCase : Any = apply_sbox(__a , temp[4:] )
_lowerCamelCase : Dict = '0' * (2 - len(__a )) + l # noqa: E741
_lowerCamelCase : Optional[Any] = '0' * (2 - len(__a )) + r
_lowerCamelCase : Tuple = apply_table(l + r , __a )
_lowerCamelCase : Tuple = xor(__a , __a )
return temp + right
if __name__ == "__main__":
a_ = input("""Enter 10 bit key: """)
a_ = input("""Enter 8 bit message: """)
a_ = [6, 3, 7, 4, 8, 5, 10, 9]
a_ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
a_ = [2, 4, 3, 1]
a_ = [2, 6, 3, 1, 4, 8, 5, 7]
a_ = [4, 1, 3, 5, 7, 2, 8, 6]
a_ = [4, 1, 2, 3, 2, 3, 4, 1]
a_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a_ = apply_table(key, paa_table)
a_ = temp[:5]
a_ = temp[5:]
a_ = left_shift(left)
a_ = left_shift(right)
a_ = apply_table(left + right, pa_table)
a_ = left_shift(left)
a_ = left_shift(right)
a_ = left_shift(left)
a_ = left_shift(right)
a_ = apply_table(left + right, pa_table)
# encryption
a_ = apply_table(message, IP)
a_ = function(expansion, sa, sa, keya, temp)
a_ = temp[4:] + temp[:4]
a_ = function(expansion, sa, sa, keya, temp)
a_ = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
a_ = apply_table(CT, IP)
a_ = function(expansion, sa, sa, keya, temp)
a_ = temp[4:] + temp[:4]
a_ = function(expansion, sa, sa, keya, temp)
a_ = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 349 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=30 , a=4_00 , a=True , a=None , a=0.9 , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , ) -> int:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {"""shortest_edge""": 30}
_UpperCamelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize_and_center_crop
_UpperCamelCase = size
_UpperCamelCase = crop_pct
_UpperCamelCase = crop_size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : str = PoolFormerImageProcessor if is_vision_available() else None
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = PoolFormerImageProcessingTester(self )
@property
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(a , """size""" ) )
self.assertTrue(hasattr(a , """crop_pct""" ) )
self.assertTrue(hasattr(a , """do_normalize""" ) )
self.assertTrue(hasattr(a , """image_mean""" ) )
self.assertTrue(hasattr(a , """image_std""" ) )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A_ ( self ) -> List[str]:
'''simple docstring'''
pass
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCamelCase = image_processing(a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCamelCase = image_processing(a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCamelCase = image_processing(a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 612 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 612 | 1 |
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowercase_ = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
lowercase_ = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
lowercase_ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowercase_ = F'''down_blocks.{i}.resnets.{j}.'''
lowercase_ = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowercase_ = F'''down_blocks.{i}.attentions.{j}.'''
lowercase_ = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowercase_ = F'''up_blocks.{i}.resnets.{j}.'''
lowercase_ = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowercase_ = F'''up_blocks.{i}.attentions.{j}.'''
lowercase_ = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowercase_ = F'''down_blocks.{i}.downsamplers.0.conv.'''
lowercase_ = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowercase_ = F'''up_blocks.{i}.upsamplers.0.'''
lowercase_ = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowercase_ = 'mid_block.attentions.0.'
lowercase_ = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowercase_ = F'''mid_block.resnets.{j}.'''
lowercase_ = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
__A = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
__A = v.replace(__UpperCamelCase , __UpperCamelCase )
__A = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
__A = v.replace(__UpperCamelCase , __UpperCamelCase )
__A = v
__A = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowercase_ = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowercase_ = F'''encoder.down_blocks.{i}.resnets.{j}.'''
lowercase_ = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowercase_ = F'''down_blocks.{i}.downsamplers.0.'''
lowercase_ = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowercase_ = F'''up_blocks.{i}.upsamplers.0.'''
lowercase_ = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowercase_ = F'''decoder.up_blocks.{i}.resnets.{j}.'''
lowercase_ = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowercase_ = F'''mid_block.resnets.{i}.'''
lowercase_ = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowercase_ = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
__A = v.replace(__UpperCamelCase , __UpperCamelCase )
__A = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
__A = v.replace(__UpperCamelCase , __UpperCamelCase )
__A = v
__A = {v: vae_state_dict[k] for k, v in mapping.items()}
__A = ['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'mid.attn_1.{weight_name}.weight' in k:
print(f'Reshaping {k} for SD format' )
__A = reshape_weight_for_sd(__UpperCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowercase_ = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
lowercase_ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowercase_ = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowercase_ = {'q': 0, 'k': 1, 'v': 2}
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = {}
__A = {}
__A = {}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
__A = k[: -len('''.q_proj.weight''' )]
__A = k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
__A = [None, None, None]
__A = v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
__A = k[: -len('''.q_proj.bias''' )]
__A = k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
__A = [None, None, None]
__A = v
continue
__A = textenc_pattern.sub(lambda __UpperCamelCase : protected[re.escape(m.group(0 ) )] , __UpperCamelCase )
__A = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__A = textenc_pattern.sub(lambda __UpperCamelCase : protected[re.escape(m.group(0 ) )] , __UpperCamelCase )
__A = torch.cat(__UpperCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
__A = textenc_pattern.sub(lambda __UpperCamelCase : protected[re.escape(m.group(0 ) )] , __UpperCamelCase )
__A = torch.cat(__UpperCamelCase )
return new_state_dict
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
lowercase_ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowercase_ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
lowercase_ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
lowercase_ = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowercase_ = load_file(unet_path, device='cpu')
else:
lowercase_ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
lowercase_ = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
lowercase_ = load_file(vae_path, device='cpu')
else:
lowercase_ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
lowercase_ = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
lowercase_ = load_file(text_enc_path, device='cpu')
else:
lowercase_ = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
lowercase_ = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
lowercase_ = convert_unet_state_dict(unet_state_dict)
lowercase_ = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowercase_ = convert_vae_state_dict(vae_state_dict)
lowercase_ = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowercase_ = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowercase_ = {'transformer.' + k: v for k, v in text_enc_dict.items()}
lowercase_ = convert_text_enc_state_dict_vaa(text_enc_dict)
lowercase_ = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
lowercase_ = convert_text_enc_state_dict(text_enc_dict)
lowercase_ = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowercase_ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowercase_ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowercase_ = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 215 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowercase_ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowerCAmelCase ( ):
"""simple docstring"""
__A = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__A = get_sagemaker_input()
else:
__A = get_cluster_input()
return config
def lowerCAmelCase ( __UpperCamelCase=None ):
"""simple docstring"""
if subparsers is not None:
__A = subparsers.add_parser('''config''' , description=__UpperCamelCase )
else:
__A = argparse.ArgumentParser('''Accelerate config command''' , description=__UpperCamelCase )
parser.add_argument(
'''--config_file''' , default=__UpperCamelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = get_user_input()
if args.config_file is not None:
__A = args.config_file
else:
if not os.path.isdir(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
__A = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(__UpperCamelCase )
else:
config.to_yaml_file(__UpperCamelCase )
print(f'accelerate configuration saved at {config_file}' )
def lowerCAmelCase ( ):
"""simple docstring"""
__A = config_command_parser()
__A = parser.parse_args()
config_command(__UpperCamelCase )
if __name__ == "__main__":
main()
| 215 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase_ ( self : Tuple , _A : str , _A : List[Any] , _A : List[Any] ):
_UpperCamelCase = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_UpperCamelCase = VideoClassificationPipeline(model=_A , image_processor=_A , top_k=2 )
_UpperCamelCase = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def UpperCamelCase_ ( self : Tuple , _A : List[Any] , _A : Union[str, Any] ):
for example in examples:
_UpperCamelCase = video_classifier(_A )
self.assertEqual(
_A , [
{'''score''': ANY(_A ), '''label''': ANY(_A )},
{'''score''': ANY(_A ), '''label''': ANY(_A )},
] , )
@require_torch
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
_UpperCamelCase = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
_UpperCamelCase = pipeline(
'''video-classification''' , model=_A , feature_extractor=_A , frame_sampling_rate=4 )
_UpperCamelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_UpperCamelCase = video_classifier(_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
_UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def UpperCamelCase_ ( self : Optional[int] ):
pass
| 10 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _snake_case :
'''simple docstring'''
def lowerCAmelCase__ ( self: int , __UpperCamelCase: str , __UpperCamelCase: int , __UpperCamelCase: Dict ) -> str:
return None
class _snake_case :
'''simple docstring'''
def lowerCAmelCase__ ( self: List[str] , __UpperCamelCase: List[Any] , __UpperCamelCase: Optional[Any] , __UpperCamelCase: Any , __UpperCamelCase: Optional[int] ) -> str:
return None
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
__snake_case = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCAmelCase__ ( self: Optional[int] ) -> Any:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase , "tf" , 12 , **__UpperCamelCase )
@require_torch
@slow
def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCamelCase , "pt" , 12 , **__UpperCamelCase )
@require_torch
@slow
def lowerCAmelCase__ ( self: Any ) -> Union[str, Any]:
from transformers import BertModel
__magic_name__ : Any = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__UpperCamelCase ) )
vocab_file.flush()
__magic_name__ : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__magic_name__ : str = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) )
model.save_pretrained(__UpperCamelCase )
self._test_export(__UpperCamelCase , "pt" , 12 , __UpperCamelCase )
@require_tf
@slow
def lowerCAmelCase__ ( self: int ) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__magic_name__ : List[str] = self._test_export(__UpperCamelCase , "tf" , 12 , **__UpperCamelCase )
__magic_name__ : Tuple = quantize(Path(__UpperCamelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__magic_name__ : Optional[Any] = self._test_export(__UpperCamelCase , "pt" , 12 , **__UpperCamelCase )
__magic_name__ : Union[str, Any] = quantize(__UpperCamelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def lowerCAmelCase__ ( self: List[str] , __UpperCamelCase: Dict , __UpperCamelCase: Dict , __UpperCamelCase: Dict , __UpperCamelCase: List[Any]=None , **__UpperCamelCase: List[Any] ) -> int:
try:
# Compute path
with TemporaryDirectory() as tempdir:
__magic_name__ : Optional[int] = Path(__UpperCamelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
return path
except Exception as e:
self.fail(__UpperCamelCase )
@require_torch
@require_tokenizers
@slow
def lowerCAmelCase__ ( self: Any ) -> Dict:
from transformers import BertModel
__magic_name__ : Optional[Any] = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
__magic_name__ : Optional[int] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__UpperCamelCase , __UpperCamelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
from transformers import TFBertModel
__magic_name__ : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
__magic_name__ : List[str] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__UpperCamelCase , __UpperCamelCase , "tf" )
def lowerCAmelCase__ ( self: Dict , __UpperCamelCase: List[Any] , __UpperCamelCase: List[Any] , __UpperCamelCase: Dict ) -> Optional[Any]:
__magic_name__ : Tuple = FeatureExtractionPipeline(__UpperCamelCase , __UpperCamelCase )
__magic_name__ : Union[str, Any] = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = infer_shapes(__UpperCamelCase , __UpperCamelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __UpperCamelCase )
self.assertSequenceEqual(variable_names[3:] , __UpperCamelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]:
__magic_name__ : List[str] = ["input_ids", "attention_mask", "token_type_ids"]
__magic_name__ : Optional[int] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
__magic_name__ , __magic_name__ : List[str] = ensure_valid_input(FuncContiguousArgs() , __UpperCamelCase , __UpperCamelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCamelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCamelCase ) , set(__UpperCamelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCamelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__magic_name__ , __magic_name__ : Dict = ensure_valid_input(FuncNonContiguousArgs() , __UpperCamelCase , __UpperCamelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCamelCase ) , 1 )
self.assertEqual(len(__UpperCamelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
__magic_name__ : Optional[Any] = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() ) | 436 | 0 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase__ : List[Any] = HfApi()
lowerCAmelCase__ : Any = {}
# fmt: off
lowerCAmelCase__ : Union[str, Any] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
lowerCAmelCase__ : int = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
lowerCAmelCase__ : str = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
lowerCAmelCase__ : str = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
lowerCAmelCase__ : List[Any] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
lowerCAmelCase__ : Union[str, Any] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
lowerCAmelCase__ : int = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
lowerCAmelCase__ : Union[str, Any] = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
lowerCAmelCase__ : Optional[int] = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
lowerCAmelCase__ : int = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
lowerCAmelCase__ : List[str] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
lowerCAmelCase__ : Any = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
lowerCAmelCase__ : str = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
lowerCAmelCase__ : List[str] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
lowerCAmelCase__ : str = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
lowerCAmelCase__ : List[str] = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase__ : str = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
lowerCAmelCase__ : Tuple = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
lowerCAmelCase__ : str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase__ : Tuple = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 502 |
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def _a ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
snake_case__ : Any = sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : x[0] / x[1] , reverse=__lowerCAmelCase )
snake_case__ , snake_case__ : Union[str, Any] = [i[0] for i in r], [i[1] for i in r]
snake_case__ : Dict = list(accumulate(__lowerCAmelCase ) )
snake_case__ : Optional[int] = bisect(__lowerCAmelCase , __lowerCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 502 | 1 |
def UpperCamelCase_( lowerCamelCase_ = 5000_0000 ) -> List[str]:
_lowercase : Union[str, Any] = set()
_lowercase : Optional[int] = int((limit - 24) ** (1 / 2) )
_lowercase : Optional[Any] = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowerCAmelCase__ ) ) )
for primea in primes:
_lowercase : Optional[Any] = primea * primea
for primea in primes:
_lowercase : List[str] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_lowercase : Optional[int] = primea * primea * primea * primea
_lowercase : Optional[Any] = square + cube + tetr
if total >= limit:
break
ret.add(lowerCAmelCase__ )
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"{solution() = }")
| 89 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self , **UpperCAmelCase ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def UpperCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase )
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCamelCase_ = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(UpperCAmelCase , return_tensors='''np''' )
lowerCamelCase_ = processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = processor(text=UpperCAmelCase )
lowerCamelCase_ = tokenizer(UpperCAmelCase , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(UpperCAmelCase )
lowerCamelCase_ = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = AlignProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase_ = '''lower newer'''
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 29 | 0 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCamelCase_ = 2_9979_2458
# Symbols
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = symbols('''ct x y z''')
def snake_case ( A__ ):
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def snake_case ( A__ ):
return 1 / sqrt(1 - beta(A__ ) ** 2 )
def snake_case ( A__ ):
return np.array(
[
[gamma(A__ ), -gamma(A__ ) * beta(A__ ), 0, 0],
[-gamma(A__ ) * beta(A__ ), gamma(A__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def snake_case ( A__ ,A__ = None ):
# Ensure event is not empty
if event is None:
UpperCAmelCase_ : List[Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(A__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCamelCase_ = transform(2997_9245)
print('''Example of four vector: ''')
print(f'ct\' = {four_vector[0]}')
print(f'x\' = {four_vector[1]}')
print(f'y\' = {four_vector[2]}')
print(f'z\' = {four_vector[3]}')
# Substitute symbols with numerical values
lowerCamelCase_ = {ct: c, x: 1, y: 1, z: 1}
lowerCamelCase_ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'\n{numerical_vector}')
| 463 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : List[Any] = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def snake_case ( A__ ):
UpperCAmelCase_ : Union[str, Any] = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def snake_case ( ):
UpperCAmelCase_ : str = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = "imagenet-1k-id2label.json"
UpperCAmelCase_ : Union[str, Any] = 10_00
UpperCAmelCase_ : Any = "huggingface/label-files"
UpperCAmelCase_ : Optional[int] = num_labels
UpperCAmelCase_ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(A__ ,A__ ,repo_type="dataset" ) ) ,"r" ) )
UpperCAmelCase_ : Optional[int] = {int(A__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[int] = idalabel
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : str = CvtConfig(num_labels=A__ ,idalabel=A__ ,labelaid=A__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" ,1 )[-1][4:6] == "13":
UpperCAmelCase_ : Optional[int] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" ,1 )[-1][4:6] == "21":
UpperCAmelCase_ : str = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase_ : Optional[int] = [2, 2, 20]
UpperCAmelCase_ : List[str] = [3, 12, 16]
UpperCAmelCase_ : Optional[Any] = [1_92, 7_68, 10_24]
UpperCAmelCase_ : Tuple = CvtForImageClassification(A__ )
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[str] = torch.load(A__ ,map_location=torch.device("cpu" ) )
UpperCAmelCase_ : int = OrderedDict()
UpperCAmelCase_ : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase_ : Optional[Any] = list_of_state_dict + cls_token(A__ )
UpperCAmelCase_ : Any = list_of_state_dict + embeddings(A__ )
for cnt in range(config.depth[idx] ):
UpperCAmelCase_ : Dict = list_of_state_dict + attention(A__ ,A__ )
UpperCAmelCase_ : Union[str, Any] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A__ )
for i in range(len(A__ ) ):
UpperCAmelCase_ : Any = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase_ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 463 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCamelCase ( a : Union[List, PIL.Image.Image, torch.Tensor] ) ->Optional[int]:
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , a , )
if isinstance(a , torch.Tensor ):
return image
elif isinstance(a , PIL.Image.Image ):
snake_case = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case , snake_case = image[0].size
snake_case , snake_case = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
snake_case = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
snake_case = np.concatenate(a , axis=0 )
snake_case = np.array(a ).astype(np.floataa ) / 255.0
snake_case = image.transpose(0 , 3 , 1 , 2 )
snake_case = 2.0 * image - 1.0
snake_case = torch.from_numpy(a )
elif isinstance(image[0] , torch.Tensor ):
snake_case = torch.cat(a , dim=0 )
return image
def __UpperCamelCase ( a : Union[List, PIL.Image.Image, torch.Tensor] ) ->str:
if isinstance(a , torch.Tensor ):
return mask
elif isinstance(a , PIL.Image.Image ):
snake_case = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
snake_case , snake_case = mask[0].size
snake_case , snake_case = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
snake_case = np.concatenate(a , axis=0 )
snake_case = mask.astype(np.floataa ) / 255.0
snake_case = 0
snake_case = 1
snake_case = torch.from_numpy(a )
elif isinstance(mask[0] , torch.Tensor ):
snake_case = torch.cat(a , dim=0 )
return mask
class _lowercase ( __a ):
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self , A__ , A__ ) -> List[Any]:
super().__init__()
self.register_modules(unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self , A__ , A__ , A__ = 2_50 , A__ = 0.0 , A__ = 10 , A__ = 10 , A__ = None , A__ = "pil" , A__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
snake_case = image
snake_case = _preprocess_image(A__ )
snake_case = original_image.to(device=self.device , dtype=self.unet.dtype )
snake_case = _preprocess_mask(A__ )
snake_case = mask_image.to(device=self.device , dtype=self.unet.dtype )
snake_case = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(A__ , A__ ) and len(A__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(A__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
snake_case = original_image.shape
snake_case = randn_tensor(A__ , generator=A__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A__ , A__ , A__ , self.device )
snake_case = eta
snake_case = self.scheduler.timesteps[0] + 1
snake_case = generator[0] if isinstance(A__ , A__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
snake_case = self.unet(A__ , A__ ).sample
# compute previous image: x_t -> x_t-1
snake_case = self.scheduler.step(A__ , A__ , A__ , A__ , A__ , A__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
snake_case = self.scheduler.undo_step(A__ , A__ , A__ )
snake_case = t
snake_case = (image / 2 + 0.5).clamp(0 , 1 )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 342 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __UpperCamelCase ( a : Any ) ->Union[str, Any]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( a : Any , a : Tuple ) ->List[Any]:
snake_case = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
snake_case = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
snake_case = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
snake_case = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
snake_case = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
snake_case = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
snake_case = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
snake_case = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
snake_case = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
snake_case = key.replace('''image_encoder.module''' , '''flava.image_model''' )
snake_case = key.replace('''text_encoder.module''' , '''flava.text_model''' )
snake_case = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
snake_case = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
snake_case = key.replace('''text_projection''' , '''flava.text_projection''' )
snake_case = key.replace('''image_projection''' , '''flava.image_projection''' )
snake_case = value.float()
for key, value in codebook_state_dict.items():
snake_case = value
return upgrade
@torch.no_grad()
def __UpperCamelCase ( a : Tuple , a : List[str] , a : Optional[Any] , a : Tuple=None ) ->Union[str, Any]:
if config_path is not None:
snake_case = FlavaConfig.from_pretrained(a )
else:
snake_case = FlavaConfig()
snake_case = FlavaForPreTraining(a ).eval()
snake_case = convert_dalle_checkpoint(a , a , save_checkpoint=a )
if os.path.exists(a ):
snake_case = torch.load(a , map_location='''cpu''' )
else:
snake_case = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )
snake_case = upgrade_state_dict(a , a )
hf_model.load_state_dict(a )
snake_case = hf_model.state_dict()
snake_case = count_parameters(a )
snake_case = count_parameters(a ) + count_parameters(a )
assert torch.allclose(a , a , atol=1e-3 )
hf_model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_lowercase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 342 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __lowerCAmelCase = 1000 ) -> Dict:
"""simple docstring"""
return sum(e for e in range(3 , __UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 721 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : str = ShapEPipeline
__lowerCAmelCase : Union[str, Any] = ["""prompt"""]
__lowerCAmelCase : Union[str, Any] = ["""prompt"""]
__lowerCAmelCase : Tuple = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
__lowerCAmelCase : Optional[Any] = False
@property
def __lowerCamelCase ( self :Dict ):
return 3_2
@property
def __lowerCamelCase ( self :str ):
return 3_2
@property
def __lowerCamelCase ( self :Optional[int] ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self :int ):
return 8
@property
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCamelCase ( self :int ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(__lowercase )
@property
def __lowerCamelCase ( self :str ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
snake_case__ : Optional[int] = PriorTransformer(**__lowercase )
return model
@property
def __lowerCamelCase ( self :Optional[int] ):
torch.manual_seed(0 )
snake_case__ : Dict = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
snake_case__ : List[str] = ShapERenderer(**__lowercase )
return model
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : str = self.dummy_prior
snake_case__ : Optional[Any] = self.dummy_text_encoder
snake_case__ : List[Any] = self.dummy_tokenizer
snake_case__ : Optional[int] = self.dummy_renderer
snake_case__ : str = HeunDiscreteScheduler(
beta_schedule='''exp''' ,num_train_timesteps=1_0_2_4 ,prediction_type='''sample''' ,use_karras_sigmas=__lowercase ,clip_sample=__lowercase ,clip_sample_range=1.0 ,)
snake_case__ : Tuple = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :int ,__lowercase :str=0 ):
if str(__lowercase ).startswith('''mps''' ):
snake_case__ : List[str] = torch.manual_seed(__lowercase )
else:
snake_case__ : Optional[Any] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
snake_case__ : Optional[int] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def __lowerCamelCase ( self :Tuple ):
snake_case__ : str = '''cpu'''
snake_case__ : str = self.get_dummy_components()
snake_case__ : Optional[Any] = self.pipeline_class(**__lowercase )
snake_case__ : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Tuple = pipe(**self.get_dummy_inputs(__lowercase ) )
snake_case__ : Dict = output.images[0]
snake_case__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
snake_case__ : Tuple = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = torch_device == '''cpu'''
snake_case__ : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=__lowercase ,relax_max_difference=__lowercase ,)
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : List[Any] = self.get_dummy_components()
snake_case__ : Any = self.pipeline_class(**__lowercase )
snake_case__ : str = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Any = 1
snake_case__ : str = 2
snake_case__ : Any = self.get_dummy_inputs(__lowercase )
for key in inputs.keys():
if key in self.batch_params:
snake_case__ : Optional[Any] = batch_size * [inputs[key]]
snake_case__ : Any = pipe(**__lowercase ,num_images_per_prompt=__lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
snake_case__ : Optional[int] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
snake_case__ : Any = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(0 )
snake_case__ : Tuple = pipe(
'''a shark''' ,generator=__lowercase ,guidance_scale=15.0 ,num_inference_steps=6_4 ,frame_size=6_4 ,output_type='''np''' ,).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__lowercase ,__lowercase )
| 219 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __A :
def __init__(self : Dict , __a : int , __a : List[str]=3 , __a : Dict=7 , __a : List[str]=True , __a : List[Any]=True , __a : Optional[Any]=False , __a : List[str]=True , __a : Dict=99 , __a : Optional[Any]=32 , __a : Union[str, Any]=5 , __a : List[Any]=4 , __a : Optional[Any]=37 , __a : Optional[Any]="gelu" , __a : Any=0.1 , __a : Union[str, Any]=0.1 , __a : Optional[Any]=512 , __a : Optional[Any]=16 , __a : int=2 , __a : Dict=0.02 , __a : str=3 , __a : Optional[int]=4 , __a : Tuple=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def _lowercase (self : int ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase (self : str ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_A , )
def _lowercase (self : Any , __a : Any , __a : List[Any] , __a : Dict , __a : Optional[int] , __a : Optional[int] , __a : Optional[int] , __a : List[Any] ):
UpperCAmelCase_ = FalconModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ = model(_A , attention_mask=_A )
UpperCAmelCase_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self : Dict , __a : Dict , __a : str , __a : Dict , __a : List[Any] , __a : Optional[int] , __a : str , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[int] , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = FalconModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
UpperCAmelCase_ = model(
_A , attention_mask=_A , encoder_hidden_states=_A , )
UpperCAmelCase_ = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase (self : str , __a : Dict , __a : Union[str, Any] , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : str , __a : Dict , ):
UpperCAmelCase_ = FalconForCausalLM(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase (self : str , __a : List[str] , __a : Union[str, Any] , __a : str , __a : int , __a : Union[str, Any] , __a : str , __a : Tuple , __a : Dict , __a : Optional[int] , ):
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = FalconForCausalLM(config=_A )
model.to(_A )
model.eval()
# first forward pass
UpperCAmelCase_ = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , )
UpperCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )["hidden_states"][0]
UpperCAmelCase_ = model(
_A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )["hidden_states"][0]
# select random slice
UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A , _A , atol=1E-3 ) )
def _lowercase (self : Any ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __A ( a_ , a_ , a_ , unittest.TestCase ):
a__ : Union[str, Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ : Tuple = (FalconForCausalLM,) if is_torch_available() else ()
a__ : Tuple = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Any = False
a__ : Any = False
def _lowercase (self : List[str] ):
UpperCAmelCase_ = FalconModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_A , hidden_size=37 )
def _lowercase (self : List[Any] ):
self.config_tester.run_common_tests()
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase (self : List[str] ):
UpperCAmelCase_ , *UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCAmelCase_ = alibi
self.model_tester.create_and_check_model(_A , *_A )
def _lowercase (self : List[Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = input_dict["input_ids"]
UpperCAmelCase_ = input_ids.ne(1 ).to(_A )
UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ = FalconForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase (self : str ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = "single_label_classification"
UpperCAmelCase_ = input_dict["input_ids"]
UpperCAmelCase_ = input_ids.ne(1 ).to(_A )
UpperCAmelCase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ = FalconForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = input_dict["input_ids"]
UpperCAmelCase_ = FalconForCausalLM(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ = model(_A , use_cache=_A )
UpperCAmelCase_ = input_ids.shape[0]
UpperCAmelCase_ = model._convert_to_rw_cache(result.past_key_values )
UpperCAmelCase_ = model._convert_cache_to_standard_format(_A , _A )
for layer in range(len(_A ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = 3
UpperCAmelCase_ = "multi_label_classification"
UpperCAmelCase_ = input_dict["input_ids"]
UpperCAmelCase_ = input_ids.ne(1 ).to(_A )
UpperCAmelCase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ = FalconForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ = model(_A , attention_mask=_A , labels=_A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase (self : Tuple ):
for model_class in self.all_generative_model_classes:
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_A , "use_cache" ):
return
UpperCAmelCase_ = model_class(_A ).to(_A )
if "use_cache" not in inputs:
UpperCAmelCase_ = True
UpperCAmelCase_ = model(**_A )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCAmelCase_ = (
getattr(_A , "decoder_layers" , _A )
or getattr(_A , "num_decoder_layers" , _A )
or config.num_hidden_layers
)
UpperCAmelCase_ = getattr(_A , "num_kv_heads" , config.num_attention_heads )
UpperCAmelCase_ = getattr(_A , "d_model" , config.hidden_size )
UpperCAmelCase_ = embed_dim // num_attention_heads
UpperCAmelCase_ = outputs["past_key_values"]
self.assertEqual(len(_A ) , _A )
UpperCAmelCase_ , UpperCAmelCase_ = inputs["input_ids"].shape
for i in range(_A ):
if config.new_decoder_architecture:
UpperCAmelCase_ = config.num_attention_heads
elif config.multi_query:
UpperCAmelCase_ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Tuple ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
UpperCAmelCase_ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(_A )
UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(_A )
UpperCAmelCase_ = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
UpperCAmelCase_ = model.generate(**_A , do_sample=_A , max_new_tokens=19 )
UpperCAmelCase_ = tokenizer.batch_decode(_A )[0]
self.assertEqual(_A , _A )
@slow
def _lowercase (self : Tuple ):
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_A )
UpperCAmelCase_ = FalconForCausalLM.from_pretrained(_A )
model.eval()
model.to(_A )
UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(_A )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_A , do_sample=_A , max_new_tokens=4 )
model.generate(**_A , do_sample=_A , max_new_tokens=4 )
model.generate(**_A , num_beams=2 , max_new_tokens=4 )
@slow
def _lowercase (self : Optional[int] ):
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCAmelCase_ = AutoTokenizer.from_pretrained(_A )
UpperCAmelCase_ = FalconForCausalLM.from_pretrained(_A )
model.eval()
model.to(device=_A )
UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(_A )
# Test results are the same with and without cache
UpperCAmelCase_ = model.generate(**_A , do_sample=_A , max_new_tokens=20 , use_cache=_A )
UpperCAmelCase_ = model.generate(**_A , do_sample=_A , max_new_tokens=20 , use_cache=_A )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 78 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaImgaImgPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''image''']
UpperCAmelCase__ = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase ( self ):
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ):
'''simple docstring'''
return 3_2
@property
def _lowercase ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowercase ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowercase ( self ):
'''simple docstring'''
return 1_0_0
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase = UNetaDConditionModel(**_A )
return model
@property
def _lowercase ( self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.dummy_unet
UpperCAmelCase = self.dummy_movq
UpperCAmelCase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_00_85,
'''beta_end''': 0.0_12,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
UpperCAmelCase = DDIMScheduler(**_A )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase ( self , _A , _A=0 ):
'''simple docstring'''
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
UpperCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(_A )
else:
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = pipe(**self.get_dummy_inputs(_A ) )
UpperCAmelCase = output.images
UpperCAmelCase = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase = '''A red cartoon frog, 4k'''
UpperCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_A )
UpperCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
UpperCAmelCase = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase , UpperCAmelCase = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase = pipeline(
image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
UpperCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
| 130 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
def A ( A_ : str ):
snake_case : Optional[int] = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(A_ ).json()
def A ( A_ : int = 10 ):
snake_case : Tuple = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
snake_case : Optional[Any] = requests.get(A_ ).json()[:max_stories]
return [get_hackernews_story(A_ ) for story_id in story_ids]
def A ( A_ : int = 10 ):
snake_case : List[Any] = hackernews_top_stories(A_ )
return "\n".join('''* [{title}]({url})'''.format(**A_ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 555 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
UpperCAmelCase = {
"gpt2": 1_024,
"gpt2-medium": 1_024,
"gpt2-large": 1_024,
"gpt2-xl": 1_024,
"distilgpt2": 1_024,
}
class a ( __magic_name__ ):
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ['''input_ids''', '''attention_mask''']
_snake_case = GPTaTokenizer
def __init__( self : Union[str, Any], SCREAMING_SNAKE_CASE_ : Union[str, Any]=None, SCREAMING_SNAKE_CASE_ : Any=None, SCREAMING_SNAKE_CASE_ : List[str]=None, SCREAMING_SNAKE_CASE_ : Union[str, Any]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : Optional[int]="<|endoftext|>", SCREAMING_SNAKE_CASE_ : Dict="<|endoftext|>", SCREAMING_SNAKE_CASE_ : Union[str, Any]=False, **SCREAMING_SNAKE_CASE_ : Tuple, ):
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
snake_case : Optional[Any] = kwargs.pop('''add_bos_token''', SCREAMING_SNAKE_CASE_ )
snake_case : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
snake_case : List[Any] = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop('''type''' ) )
snake_case : Dict = add_prefix_space
snake_case : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
snake_case : Optional[Any] = add_prefix_space
def __snake_case ( self : Optional[Any], *SCREAMING_SNAKE_CASE_ : Dict, **SCREAMING_SNAKE_CASE_ : List[str] ):
snake_case : str = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any], *SCREAMING_SNAKE_CASE_ : Tuple, **SCREAMING_SNAKE_CASE_ : int ):
snake_case : int = kwargs.get('''is_split_into_words''', SCREAMING_SNAKE_CASE_ )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any], SCREAMING_SNAKE_CASE_ : str, SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
snake_case : List[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any], SCREAMING_SNAKE_CASE_ : "Conversation" ):
snake_case : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length:
snake_case : str = input_ids[-self.model_max_length :]
return input_ids
| 555 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_: Optional[Any] =get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Union[str, Any] = DebertaVaTokenizer
a__ : Any = DebertaVaTokenizerFast
a__ : Union[str, Any] = True
a__ : Tuple = True
def _lowercase (self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = DebertaVaTokenizer(__a , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase (self : int , __a : Optional[Any] ):
UpperCAmelCase_ = "this is a test"
UpperCAmelCase_ = "this is a test"
return input_text, output_text
def _lowercase (self : Tuple ):
UpperCAmelCase_ = "<pad>"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(__a ) , 30001 )
def _lowercase (self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def _lowercase (self : Tuple ):
# fmt: off
UpperCAmelCase_ = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowercase (self : str ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowercase (self : Any ):
pass
def _lowercase (self : List[Any] ):
# fmt: off
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ = DebertaVaTokenizer(__a , split_by_punct=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , split_by_punct=__a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def _lowercase (self : Dict ):
# fmt: off
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def _lowercase (self : Any ):
# fmt: off
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def _lowercase (self : Optional[Any] ):
# fmt: off
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def _lowercase (self : int ):
# fmt: off
UpperCAmelCase_ = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a ) )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a ) )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(__a )
UpperCAmelCase_ = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Tuple ):
UpperCAmelCase_ = "This is a test"
UpperCAmelCase_ = [13, 1, 4398, 25, 21, 1289]
UpperCAmelCase_ = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ = DebertaVaTokenizer(__a , keep_accents=__a )
UpperCAmelCase_ = DebertaVaTokenizerFast(__a , keep_accents=__a )
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
# fmt: off
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
UpperCAmelCase_ = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ = tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
UpperCAmelCase_ = rust_tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(__a , __a )
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = DebertaVaTokenizer(__a )
UpperCAmelCase_ = tokenizer.encode("sequence builders" )
UpperCAmelCase_ = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(__a , __a )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __a )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __a , )
@slow
def _lowercase (self : Union[str, Any] ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 78 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case__ : Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : Dict = XLMProphetNetTokenizer
_snake_case : List[str] = False
_snake_case : int = True
def _snake_case ( self : str ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = XLMProphetNetTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = "[PAD]"
__lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase ) , 1_012 )
def _snake_case ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = XLMProphetNetTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
__lowercase = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowercase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__lowercase = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def _snake_case ( self : Any ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = "Hello World!"
__lowercase = [35_389, 6_672, 49, 2]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowercase = {"input_ids": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 402 | 0 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowercase_ = 8
def a ( A__ : Optional[int] , A__ : int=BITS ) -> Optional[int]:
"""simple docstring"""
_lowercase =x.device
_lowercase =(x * 255).int().clamp(0 , 255 )
_lowercase =2 ** torch.arange(bits - 1 , -1 , -1 , device=A__ )
_lowercase =rearrange(A__ , 'd -> d 1 1' )
_lowercase =rearrange(A__ , 'b c h w -> b c 1 h w' )
_lowercase =((x & mask) != 0).float()
_lowercase =rearrange(A__ , 'b c d h w -> b (c d) h w' )
_lowercase =bits * 2 - 1
return bits
def a ( A__ : List[Any] , A__ : int=BITS ) -> Optional[Any]:
"""simple docstring"""
_lowercase =x.device
_lowercase =(x > 0).int()
_lowercase =2 ** torch.arange(bits - 1 , -1 , -1 , device=A__ , dtype=torch.intaa )
_lowercase =rearrange(A__ , 'd -> d 1 1' )
_lowercase =rearrange(A__ , 'b (c d) h w -> b c d h w' , d=8 )
_lowercase =reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def a ( self : Optional[Any] , A__ : torch.FloatTensor , A__ : int , A__ : torch.FloatTensor , A__ : float = 0.0 , A__ : bool = True , A__ : int=None , A__ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_lowercase =timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_lowercase =self.alphas_cumprod[timestep]
_lowercase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_lowercase =1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_lowercase =self.bit_scale
if self.config.clip_sample:
_lowercase =torch.clamp(A__ , -scale , A__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_lowercase =self._get_variance(A__ , A__ )
_lowercase =eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_lowercase =(sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase =(1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase =alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_lowercase =model_output.device if torch.is_tensor(A__ ) else 'cpu'
_lowercase =torch.randn(model_output.shape , dtype=model_output.dtype , generator=A__ ).to(A__ )
_lowercase =self._get_variance(A__ , A__ ) ** 0.5 * eta * noise
_lowercase =prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=A__ , pred_original_sample=A__ )
def a ( self : Any , A__ : torch.FloatTensor , A__ : int , A__ : torch.FloatTensor , A__ : Tuple="epsilon" , A__ : Any=None , A__ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
_lowercase =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_lowercase =torch.split(A__ , sample.shape[1] , dim=1 )
else:
_lowercase =None
# 1. compute alphas, betas
_lowercase =self.alphas_cumprod[t]
_lowercase =self.alphas_cumprod[t - 1] if t > 0 else self.one
_lowercase =1 - alpha_prod_t
_lowercase =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_lowercase =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_lowercase =model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
_lowercase =self.bit_scale
if self.config.clip_sample:
_lowercase =torch.clamp(A__ , -scale , A__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase =(alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_lowercase =self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowercase =0
if t > 0:
_lowercase =torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=A__ ).to(model_output.device )
_lowercase =(self._get_variance(A__ , predicted_variance=A__ ) ** 0.5) * noise
_lowercase =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=A__ , pred_original_sample=A__ )
class __lowerCAmelCase ( UpperCAmelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1.0 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_lowercase =bit_scale
_lowercase =(
ddim_bit_scheduler_step if isinstance(_lowercase , _lowercase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , lowerCAmelCase = 256 , lowerCAmelCase = 256 , lowerCAmelCase = 50 , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = "pil" , lowerCAmelCase = True , **lowerCAmelCase , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
_lowercase =torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_lowercase , )
_lowercase =decimal_to_bits(_lowercase ) * self.bit_scale
_lowercase =latents.to(self.device )
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_lowercase =self.unet(_lowercase , _lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
_lowercase =self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
_lowercase =bits_to_decimal(_lowercase )
if output_type == "pil":
_lowercase =self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 707 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=18 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=False , ) -> Tuple:
'''simple docstring'''
_lowercase =size if size is not None else {'height': 20, 'width': 20}
_lowercase =crop_size if crop_size is not None else {'height': 18, 'width': 18}
_lowercase =parent
_lowercase =batch_size
_lowercase =num_channels
_lowercase =image_size
_lowercase =min_resolution
_lowercase =max_resolution
_lowercase =do_resize
_lowercase =size
_lowercase =do_center_crop
_lowercase =crop_size
_lowercase =do_normalize
_lowercase =image_mean
_lowercase =image_std
_lowercase =do_reduce_labels
def A__ ( self ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def a ( ) -> Tuple:
"""simple docstring"""
_lowercase =load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_lowercase =Image.open(dataset[0]['file'] )
_lowercase =Image.open(dataset[1]['file'] )
return image, map
def a ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
_lowercase =Image.open(ds[0]['file'] )
_lowercase =Image.open(ds[1]['file'] )
_lowercase =Image.open(ds[2]['file'] )
_lowercase =Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = BeitImageProcessor if is_vision_available() else None
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =BeitImageProcessingTester(self )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'center_crop' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase , 'image_std' ) )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase )
_lowercase =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCAmelCase )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
_lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase =image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
_lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase =image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
_lowercase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_lowercase =image_processing(lowerCAmelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
_lowercase =[]
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
_lowercase =image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
_lowercase , _lowercase =prepare_semantic_single_inputs()
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
_lowercase , _lowercase =prepare_semantic_batch_inputs()
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowercase , _lowercase =prepare_semantic_single_inputs()
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
_lowercase =True
_lowercase =image_processing(lowerCAmelCase , lowerCAmelCase , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
| 380 | 0 |
"""simple docstring"""
a_ = """Input must be a string of 8 numbers plus letter"""
a_ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def __lowercase ( snake_case_ : str ) ->bool:
'''simple docstring'''
if not isinstance(snake_case_ ,snake_case_ ):
__A : str = F"""Expected string as input, found {type(snake_case_ ).__name__}"""
raise TypeError(snake_case_ )
__A : Tuple = spanish_id.replace('''-''' ,'''''' ).upper()
if len(snake_case_ ) != 9:
raise ValueError(snake_case_ )
try:
__A : Optional[Any] = int(spanish_id_clean[0:8] )
__A : Optional[Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(snake_case_ ) from ex
if letter.isdigit():
raise ValueError(snake_case_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = """poolformer"""
def __init__( self , __lowerCamelCase=3 , __lowerCamelCase=16 , __lowerCamelCase=16 , __lowerCamelCase=3 , __lowerCamelCase=4.0 , __lowerCamelCase=[2, 2, 6, 2] , __lowerCamelCase=[64, 128, 320, 512] , __lowerCamelCase=[7, 3, 3, 3] , __lowerCamelCase=[4, 2, 2, 2] , __lowerCamelCase=[2, 1, 1, 1] , __lowerCamelCase=4 , __lowerCamelCase=0.0 , __lowerCamelCase="gelu" , __lowerCamelCase=True , __lowerCamelCase=1e-5 , __lowerCamelCase=0.0_2 , **__lowerCamelCase , ):
'''simple docstring'''
__A : str = num_channels
__A : List[str] = patch_size
__A : str = stride
__A : Any = padding
__A : Any = pool_size
__A : Dict = hidden_sizes
__A : Optional[Any] = mlp_ratio
__A : Any = depths
__A : List[str] = patch_sizes
__A : Union[str, Any] = strides
__A : List[str] = num_encoder_blocks
__A : Optional[int] = drop_path_rate
__A : Union[str, Any] = hidden_act
__A : Optional[Any] = use_layer_scale
__A : List[Any] = layer_scale_init_value
__A : List[Any] = initializer_range
super().__init__(**__lowerCamelCase )
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = version.parse("""1.11""" )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCamelCase__( self ):
'''simple docstring'''
return 2e-3
| 177 | 1 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowercase_ = "scheduler_config.json"
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Any = 1
__lowerCamelCase : Dict = 2
__lowerCamelCase : Optional[int] = 3
__lowerCamelCase : Tuple = 4
__lowerCamelCase : Any = 5
__lowerCamelCase : Any = 6
__lowerCamelCase : Dict = 7
__lowerCamelCase : List[str] = 8
__lowerCamelCase : Any = 9
__lowerCamelCase : Tuple = 10
__lowerCamelCase : Optional[Any] = 11
__lowerCamelCase : int = 12
__lowerCamelCase : int = 13
__lowerCamelCase : str = 14
@dataclass
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : torch.FloatTensor
class __A :
'''simple docstring'''
__lowerCamelCase : Dict = SCHEDULER_CONFIG_NAME
__lowerCamelCase : str = []
__lowerCamelCase : Any = True
@classmethod
def a__ (cls , A = None , A = None , A=False , **A , ) -> List[Any]:
"""simple docstring"""
_a , _a , _a = cls.load_config(
pretrained_model_name_or_path=A , subfolder=A , return_unused_kwargs=A , return_commit_hash=A , **A , )
return cls.from_config(A , return_unused_kwargs=A , **A )
def a__ (self , A , A = False , **A ) -> Union[str, Any]:
"""simple docstring"""
self.save_config(save_directory=A , push_to_hub=A , **A )
@property
def a__ (self ) -> Dict:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def a__ (cls ) -> Tuple:
"""simple docstring"""
_a = list(set([cls.__name__] + cls._compatibles ) )
_a = importlib.import_module(__name__.split('''.''' )[0] )
_a = [
getattr(A , A ) for c in compatible_classes_str if hasattr(A , A )
]
return compatible_classes
| 352 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __A ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , A , A=7 , A=3 , A=18 , A=30 , A=400 , A=True , A=None , A=True , A=None , ) -> Any:
"""simple docstring"""
_a = size if size is not None else {'''shortest_edge''': 20}
_a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_center_crop
_a = crop_size
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __A ( A , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = MobileNetVaImageProcessor if is_vision_available() else None
def a__ (self ) -> int:
"""simple docstring"""
_a = MobileNetVaImageProcessingTester(self )
@property
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ (self ) -> str:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''do_center_crop''' ) )
self.assertTrue(hasattr(A , '''crop_size''' ) )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
pass
def a__ (self ) -> Any:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_a = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 352 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCamelCase__ : str = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
__a : Dict = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__a : List[Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__a : Union[str, Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__a : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(
task='text-classification' ,model='hf-internal-testing/tiny-random-distilbert' ,framework='pt' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'LABEL_0', 'score': 0.504}] )
SCREAMING_SNAKE_CASE_ : Any = text_classifier('This is great !' ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ) ,[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] )
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier(['This is great !', 'This is bad'] ,top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ ) ,[
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] ,)
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier('This is great !' ,top_k=1 )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'LABEL_0', 'score': 0.504}] )
# Legacy behavior
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_classifier('This is great !' ,return_all_scores=snake_case__ )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'LABEL_0', 'score': 0.504}] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_classifier('This is great !' ,return_all_scores=snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) ,[[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_classifier(['This is great !', 'Something else'] ,return_all_scores=snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) ,[
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}],
] ,)
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier(['This is great !', 'Something else'] ,return_all_scores=snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) ,[
{'label': 'LABEL_0', 'score': 0.504},
{'label': 'LABEL_0', 'score': 0.504},
] ,)
@require_torch
def snake_case ( self ):
import torch
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline(
task='text-classification' ,model='hf-internal-testing/tiny-random-distilbert' ,framework='pt' ,device=torch.device('cpu' ) ,)
SCREAMING_SNAKE_CASE_ : List[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'LABEL_0', 'score': 0.504}] )
@require_tf
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = pipeline(
task='text-classification' ,model='hf-internal-testing/tiny-random-distilbert' ,framework='tf' )
SCREAMING_SNAKE_CASE_ : int = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'LABEL_0', 'score': 0.504}] )
@slow
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = pipeline('text-classification' )
SCREAMING_SNAKE_CASE_ : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'POSITIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'NEGATIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ : List[Any] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'POSITIVE', 'score': 0.988}] )
@slow
@require_tf
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = pipeline('text-classification' ,framework='tf' )
SCREAMING_SNAKE_CASE_ : List[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'POSITIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ : Optional[int] = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'NEGATIVE', 'score': 1.0}] )
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': 'POSITIVE', 'score': 0.988}] )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = TextClassificationPipeline(model=snake_case__ ,tokenizer=snake_case__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
SCREAMING_SNAKE_CASE_ : Dict = 'HuggingFace is in'
SCREAMING_SNAKE_CASE_ : Optional[int] = text_classifier(snake_case__ )
self.assertEqual(nested_simplify(snake_case__ ) ,[{'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['HuggingFace is in ', 'Paris is in France']
SCREAMING_SNAKE_CASE_ : Optional[Any] = text_classifier(snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) ,[{'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )}, {'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )}] ,)
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier(snake_case__ ,top_k=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(snake_case__ ) ,[[{'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )}] * N, [{'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )}] * N] ,)
SCREAMING_SNAKE_CASE_ : List[str] = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
SCREAMING_SNAKE_CASE_ : List[Any] = text_classifier(snake_case__ )
self.assertEqual(
nested_simplify(snake_case__ ) ,{'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )} ,)
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
SCREAMING_SNAKE_CASE_ : List[str] = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(snake_case__ ):
text_classifier(snake_case__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
SCREAMING_SNAKE_CASE_ : Tuple = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(snake_case__ ) ,[{'label': ANY(snake_case__ ), 'score': ANY(snake_case__ )}] ,)
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 105 |
"""simple docstring"""
from collections import deque
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
a__ = process_name # process name
a__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
a__ = arrival_time
a__ = burst_time # remaining burst time
a__ = 0 # total time of the process wait in ready queue
a__ = 0 # time from arrival time to completion time
class lowercase:
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
a__ = number_of_queues
# time slice of queues that round robin algorithm applied
a__ = time_slices
# unfinished process is in this ready_queue
a__ = queue
# current time
a__ = current_time
# finished process is in this sequence queue
a__ = deque()
def lowercase__ ( self ) -> list[str]:
"""simple docstring"""
a__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
a__ = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
a__ = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
a__ = []
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
return [q.burst_time for q in queue]
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> deque[Process]:
"""simple docstring"""
a__ = deque() # sequence deque of finished process
while len(__SCREAMING_SNAKE_CASE ) != 0:
a__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__SCREAMING_SNAKE_CASE )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
a__ = 0
# set the process's turnaround time because it is finished
a__ = self.current_time - cp.arrival_time
# set the completion time
a__ = self.current_time
# add the process to queue that has finished queue
finished.append(__SCREAMING_SNAKE_CASE )
self.finish_queue.extend(__SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowercase__ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> tuple[deque[Process], deque[Process]]:
"""simple docstring"""
a__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__SCREAMING_SNAKE_CASE ) ):
a__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__SCREAMING_SNAKE_CASE )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
a__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__SCREAMING_SNAKE_CASE )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
a__ = 0
# set the finish time
a__ = self.current_time
# update the process' turnaround time because it is finished
a__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__SCREAMING_SNAKE_CASE )
self.finish_queue.extend(__SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowercase__ ( self ) -> deque[Process]:
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
a__ , a__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
a : List[str] = Process('P1', 0, 53)
a : Optional[int] = Process('P2', 0, 17)
a : Union[str, Any] = Process('P3', 0, 68)
a : Optional[int] = Process('P4', 0, 24)
a : Optional[int] = 3
a : Optional[Any] = [17, 25]
a : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
a : Union[str, Any] = Process('P1', 0, 53)
a : Optional[int] = Process('P2', 0, 17)
a : Optional[Any] = Process('P3', 0, 68)
a : str = Process('P4', 0, 24)
a : Optional[Any] = 3
a : Tuple = [17, 25]
a : int = deque([Pa, Pa, Pa, Pa])
a : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
a : Optional[int] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 273 | 0 |
def __UpperCamelCase ( _A : dict ) -> set:
"""simple docstring"""
lowerCAmelCase : int = set()
# edges = list of graph's edges
lowerCAmelCase : List[str] = get_edges(_A )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase : Tuple = edges.pop()
chosen_vertices.add(_A )
chosen_vertices.add(_A )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_A )
return chosen_vertices
def __UpperCamelCase ( _A : dict ) -> set:
"""simple docstring"""
lowerCAmelCase : Optional[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 712 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( a ):
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=snake_case__ , speech_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , )
def lowercase ( self , snake_case__ = "auto" ):
if slice_size == "auto":
lowerCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def lowercase ( self ):
self.enable_attention_slicing(snake_case__ )
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__=1_6000 , snake_case__ = 512 , snake_case__ = 512 , snake_case__ = 50 , snake_case__ = 7.5 , snake_case__ = None , snake_case__ = 1 , snake_case__ = 0.0 , snake_case__ = None , snake_case__ = None , snake_case__ = "pil" , snake_case__ = True , snake_case__ = None , snake_case__ = 1 , **snake_case__ , ):
lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
snake_case__ , return_tensors='pt' , sampling_rate=snake_case__ ).input_features.to(self.device )
lowerCAmelCase : Optional[Any] = self.speech_model.generate(snake_case__ , max_length=48_0000 )
lowerCAmelCase : str = self.speech_processor.tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , normalize=snake_case__ )[
0
]
if isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = 1
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = len(snake_case__ )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(snake_case__ )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case__ , snake_case__ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case__ )}." )
# get prompt text embeddings
lowerCAmelCase : str = self.tokenizer(
snake_case__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowerCAmelCase : Tuple = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowerCAmelCase : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = text_embeddings.shape
lowerCAmelCase : Any = text_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , snake_case__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase : List[str]
if negative_prompt is None:
lowerCAmelCase : Any = [''] * batch_size
elif type(snake_case__ ) is not type(snake_case__ ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(snake_case__ )} !="
f" {type(snake_case__ )}." )
elif isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = [negative_prompt]
elif batch_size != len(snake_case__ ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(snake_case__ )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
lowerCAmelCase : Dict = negative_prompt
lowerCAmelCase : Optional[int] = text_input_ids.shape[-1]
lowerCAmelCase : int = self.tokenizer(
snake_case__ , padding='max_length' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='pt' , )
lowerCAmelCase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase : List[Any] = uncond_embeddings.shape[1]
lowerCAmelCase : List[str] = uncond_embeddings.repeat(1 , snake_case__ , 1 )
lowerCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , snake_case__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase : Dict = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase : str = torch.randn(snake_case__ , generator=snake_case__ , device='cpu' , dtype=snake_case__ ).to(
self.device )
else:
lowerCAmelCase : Tuple = torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase : str = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(snake_case__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase : Tuple = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : Union[str, Any] = {}
if accepts_eta:
lowerCAmelCase : int = eta
for i, t in enumerate(self.progress_bar(snake_case__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase : Tuple = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
# predict the noise residual
lowerCAmelCase : List[str] = self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase , lowerCAmelCase : Dict = noise_pred.chunk(2 )
lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : int = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase : Dict = self.vae.decode(snake_case__ ).sample
lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase : Dict = self.numpy_to_pil(snake_case__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
| 646 | 0 |
import numpy as np
a_ :int = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict ) ->None:
snake_case__ : Optional[int] = np.array(_snake_case )
def lowercase_ ( self : str, _snake_case : str ) ->np.ndarray:
snake_case__ , snake_case__ : str = np.where(letter == self.SQUARE )
snake_case__ : Optional[int] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def lowercase_ ( self : str, _snake_case : int, _snake_case : int ) ->str:
snake_case__ : List[str] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def lowercase_ ( self : Optional[Any], _snake_case : str ) ->str:
snake_case__ : List[str] = message.lower()
snake_case__ : Dict = message.replace(' ', '' )
snake_case__ : int = message.replace('j', 'i' )
snake_case__ : Dict = np.empty((2, len(_snake_case )) )
for letter_index in range(len(_snake_case ) ):
snake_case__ : Union[str, Any] = self.letter_to_numbers(message[letter_index] )
snake_case__ : Union[str, Any] = numbers[0]
snake_case__ : int = numbers[1]
snake_case__ : str = first_step.reshape(2 * len(_snake_case ) )
snake_case__ : List[Any] = ''
for numbers_index in range(len(_snake_case ) ):
snake_case__ : Tuple = int(second_step[numbers_index * 2] )
snake_case__ : Union[str, Any] = int(second_step[(numbers_index * 2) + 1] )
snake_case__ : str = self.numbers_to_letter(_snake_case, _snake_case )
snake_case__ : Any = encoded_message + letter
return encoded_message
def lowercase_ ( self : int, _snake_case : str ) ->str:
snake_case__ : List[str] = message.lower()
message.replace(' ', '' )
snake_case__ : Optional[int] = np.empty(2 * len(_snake_case ) )
for letter_index in range(len(_snake_case ) ):
snake_case__ : List[Any] = self.letter_to_numbers(message[letter_index] )
snake_case__ : Dict = numbers[0]
snake_case__ : Optional[int] = numbers[1]
snake_case__ : List[str] = first_step.reshape((2, len(_snake_case )) )
snake_case__ : List[str] = ''
for numbers_index in range(len(_snake_case ) ):
snake_case__ : List[Any] = int(second_step[0, numbers_index] )
snake_case__ : Dict = int(second_step[1, numbers_index] )
snake_case__ : int = self.numbers_to_letter(_snake_case, _snake_case )
snake_case__ : List[Any] = decoded_message + letter
return decoded_message
| 478 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ :int = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """maskformer-swin"""
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[Any], _snake_case : Dict=2_2_4, _snake_case : Optional[Any]=4, _snake_case : Dict=3, _snake_case : int=9_6, _snake_case : int=[2, 2, 6, 2], _snake_case : int=[3, 6, 1_2, 2_4], _snake_case : Tuple=7, _snake_case : Tuple=4.0, _snake_case : int=True, _snake_case : Union[str, Any]=0.0, _snake_case : Tuple=0.0, _snake_case : Dict=0.1, _snake_case : Optional[int]="gelu", _snake_case : List[str]=False, _snake_case : Union[str, Any]=0.0_2, _snake_case : int=1e-5, _snake_case : Any=None, _snake_case : Tuple=None, **_snake_case : int, ) ->Optional[int]:
super().__init__(**_snake_case )
snake_case__ : List[Any] = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : Union[str, Any] = embed_dim
snake_case__ : Dict = depths
snake_case__ : Optional[Any] = len(_snake_case )
snake_case__ : Optional[int] = num_heads
snake_case__ : Union[str, Any] = window_size
snake_case__ : Tuple = mlp_ratio
snake_case__ : Tuple = qkv_bias
snake_case__ : int = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Optional[Any] = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Tuple = use_absolute_embeddings
snake_case__ : Tuple = layer_norm_eps
snake_case__ : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : int = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
snake_case__ : int = ['stem'] + [F'''stage{idx}''' for idx in range(1, len(_snake_case ) + 1 )]
snake_case__ , snake_case__ : int = get_aligned_output_features_output_indices(
out_features=_snake_case, out_indices=_snake_case, stage_names=self.stage_names )
| 478 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase: List[str] ={
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: int =["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: str =[
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Tuple =[
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: int =["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Any =[
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_UpperCamelCase: Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721 |
def _a ( __SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(__SCREAMING_SNAKE_CASE ) == 1:
return True
_lowerCAmelCase = series[1] - series[0]
for index in range(len(__SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _a ( __SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
_lowerCAmelCase = 0
for val in series:
answer += val
return answer / len(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 585 | 0 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple ) -> None:
'''simple docstring'''
_UpperCamelCase = [2, 1, 2, -1]
_UpperCamelCase = [1, 2, 3, 4]
def snake_case__ ( self : Optional[int] ) -> list[float]:
'''simple docstring'''
_UpperCamelCase = len(self.first_signal )
_UpperCamelCase = len(self.second_signal )
_UpperCamelCase = max(lowerCAmelCase__ , lowerCAmelCase__ )
# create a zero matrix of max_length x max_length
_UpperCamelCase = [[0] * max_length for i in range(lowerCAmelCase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowerCAmelCase__ ):
_UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(lowerCAmelCase__ )
for j, item in enumerate(lowerCAmelCase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
_UpperCamelCase = np.matmul(np.transpose(lowerCAmelCase__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowerCAmelCase__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 98 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "rwkv"
SCREAMING_SNAKE_CASE : Any = {"max_position_embeddings": "context_length"}
def __init__( self : Union[str, Any] , _UpperCamelCase : Any=5_0_2_7_7 , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Optional[int]=4_0_9_6 , _UpperCamelCase : str=3_2 , _UpperCamelCase : Tuple=None , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[int]=1e-5 , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : int=6 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[int]=True , **_UpperCamelCase : int , ) ->List[str]:
snake_case_ = vocab_size
snake_case_ = context_length
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
snake_case_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
snake_case_ = layer_norm_epsilon
snake_case_ = rescale_every
snake_case_ = use_cache
snake_case_ = bos_token_id
snake_case_ = eos_token_id
super().__init__(
tie_word_embeddings=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase ) | 39 | 0 |
from math import pow, sqrt
def _UpperCAmelCase (*UpperCamelCase_ : float ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = len(UpperCamelCase_ ) > 0 and all(value > 0.0 for value in values )
return result
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase_ , UpperCamelCase_ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _UpperCAmelCase (UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ):
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 700 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case (_a ):
lowerCAmelCase__ = (PNDMScheduler,)
lowerCAmelCase__ = (("num_inference_steps", 5_0),)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : str ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : str = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : List[str] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config(**_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_lowerCAmelCase : str = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase : Any = dummy_past_residuals[:]
_lowerCAmelCase : int = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : List[str] = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : Tuple = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : Optional[int] = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Any=0 , **_UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : int = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
_lowerCAmelCase : List[Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Optional[Any] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : Any = dummy_past_residuals[:]
_lowerCAmelCase : Tuple = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : str = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : Any = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : Any = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Any , **_UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(**_UpperCAmelCase )
_lowerCAmelCase : List[str] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase : List[Any] = 10
_lowerCAmelCase : Any = self.dummy_model()
_lowerCAmelCase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCAmelCase : List[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Tuple = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCAmelCase : List[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Dict = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Tuple = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase : str = self.dummy_sample
_lowerCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ):
_lowerCAmelCase : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase : List[str] = dummy_past_residuals[:]
_lowerCAmelCase : Optional[int] = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase : Tuple = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : Optional[Any] = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
_lowerCAmelCase : Dict = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase : Any = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Any = 0.1 * sample
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCAmelCase : List[Any] = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Tuple = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : str = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase : List[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.full_loop(prediction_type="""v_prediction""" )
_lowerCAmelCase : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
_lowerCAmelCase : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : int = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 196 | 0 |
'''simple docstring'''
lowerCAmelCase_ : List[str] = tuple[float, float, float]
lowerCAmelCase_ : List[Any] = tuple[float, float, float]
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : str = end_pointa[0] - end_pointa[0]
_UpperCamelCase : str = end_pointa[1] - end_pointa[1]
_UpperCamelCase : Dict = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
_UpperCamelCase : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_UpperCamelCase : Dict = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __A ( UpperCAmelCase ,UpperCAmelCase ) -> Dict:
'''simple docstring'''
return tuple(round(UpperCAmelCase ,UpperCAmelCase ) for x in vector ) == (0, 0, 0)
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase = 1_0 ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = create_vector(UpperCAmelCase ,UpperCAmelCase )
_UpperCamelCase : Optional[Any] = create_vector(UpperCAmelCase ,UpperCAmelCase )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase ,UpperCAmelCase ) ,UpperCAmelCase )
| 435 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=0.6 , lowerCamelCase=None , ):
'''simple docstring'''
__A : Tuple = parent
__A : Union[str, Any] = batch_size
__A : List[Any] = image_size
__A : Union[str, Any] = patch_size
__A : List[Any] = num_channels
__A : Optional[Any] = is_training
__A : str = use_labels
__A : Tuple = hidden_size
__A : int = num_hidden_layers
__A : Dict = num_attention_heads
__A : List[Any] = intermediate_size
__A : Tuple = hidden_act
__A : Tuple = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : Optional[Any] = type_sequence_label_size
__A : Union[str, Any] = initializer_range
__A : Optional[Any] = mask_ratio
__A : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__A : Optional[int] = (image_size // patch_size) ** 2
__A : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Tuple = None
if self.use_labels:
__A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ViTMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__A : str = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Optional[Any] = model(lowerCamelCase )
__A : List[str] = (self.image_size // self.patch_size) ** 2
__A : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__A : List[str] = 1
__A : str = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A : int = model(lowerCamelCase )
__A : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[str] = self.prepare_config_and_inputs()
__A ,__A ,__A : List[Any] = config_and_inputs
__A : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase__ = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = ViTMAEModelTester(self )
__A : Tuple = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(lowerCamelCase )
__A : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Optional[Any] = [*signature.parameters.keys()]
__A : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
np.random.seed(2 )
__A : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
__A : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__A : Optional[Any] = torch.from_numpy(lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__A : int = pt_noise
super().check_pt_tf_models(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : int = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__A : Tuple = outputs[0].cpu().numpy()
__A : List[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
__A : List[Any] = model_class.from_pretrained(lowerCamelCase )
model.to(lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
__A : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
# Make sure we don't have nans
__A : List[Any] = after_outputs[0].cpu().numpy()
__A : List[str] = 0
__A : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = ViTMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _lowercase ():
'''simple docstring'''
__A : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
np.random.seed(2 )
__A : Dict = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(lowerCamelCase )
__A : str = self.default_image_processor
__A : List[Any] = prepare_img()
__A : Union[str, Any] = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__A : Optional[Any] = ViTMAEConfig()
__A : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__A : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
__A : str = model(**lowerCamelCase , noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) )
# verify the logits
__A : Union[str, Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__A : int = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase ) , atol=1E-4 ) )
| 111 | 0 |
from __future__ import annotations
_A : List[str] = []
def lowerCamelCase__ ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
for i in range(len(__lowerCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(__lowerCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__lowerCAmelCase , -1 , -1 ) , range(__lowerCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__lowerCAmelCase , -1 , -1 ) , range(__lowerCAmelCase , len(__lowerCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def lowerCamelCase__ ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
"""simple docstring"""
if row >= len(__lowerCAmelCase ):
solution.append(__lowerCAmelCase )
printboard(__lowerCAmelCase )
print()
return True
for i in range(len(__lowerCAmelCase ) ):
if is_safe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lowerCAmelCase_ = 1
solve(__lowerCAmelCase , row + 1 )
lowerCAmelCase_ = 0
return False
def lowerCamelCase__ ( __lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
for i in range(len(__lowerCAmelCase ) ):
for j in range(len(__lowerCAmelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_A : Any = 8
_A : Tuple = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 700 |
from collections.abc import Iterable
from typing import Any
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase = None ) -> str:
lowerCAmelCase_ = value
lowerCAmelCase_ = None # Added in order to delete a node easier
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase = None ) -> Union[str, Any]:
lowerCAmelCase_ = root
def __str__( self ) -> str:
return str(self.root )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
if new_children is not None: # reset its kids
lowerCAmelCase_ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_UpperCamelCase ): # If it is the right children
lowerCAmelCase_ = new_children
else:
lowerCAmelCase_ = new_children
else:
lowerCAmelCase_ = new_children
def __a ( self , _UpperCamelCase ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __a ( self ) -> bool:
return self.root is None
def __a ( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = Node(_UpperCamelCase ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ = new_node
break
else:
lowerCAmelCase_ = parent_node.right
lowerCAmelCase_ = parent_node
def __a ( self , *_UpperCamelCase ) -> None:
for value in values:
self.__insert(_UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> Node | None:
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ = node.left if value < node.value else node.right
return node
def __a ( self , _UpperCamelCase = None ) -> Node | None:
if node is None:
if self.root is None:
return None
lowerCAmelCase_ = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ = node.right
return node
def __a ( self , _UpperCamelCase = None ) -> Node | None:
if node is None:
lowerCAmelCase_ = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ = self.root
while node.left is not None:
lowerCAmelCase_ = node.left
return node
def __a ( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = self.search(_UpperCamelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_UpperCamelCase , _UpperCamelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(_UpperCamelCase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_UpperCamelCase , node.left )
else:
lowerCAmelCase_ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __a ( self , _UpperCamelCase ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __a ( self , _UpperCamelCase=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
if node:
self.inorder(_UpperCamelCase , node.left )
arr.append(node.value )
self.inorder(_UpperCamelCase , node.right )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = []
self.inorder(_UpperCamelCase , _UpperCamelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase__ ( __lowerCAmelCase : Node | None ):
"""simple docstring"""
lowerCAmelCase_ = []
if curr_node is not None:
lowerCAmelCase_ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ = BinarySearchTree()
for i in testlist:
t.insert(__lowerCAmelCase )
# Prints all the elements of the list in order traversal
print(__lowerCAmelCase )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__lowerCAmelCase )
print(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 279 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowercase__ : Union[str, Any] = logging.getLogger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'summarization'
_snake_case : Union[str, Any] = ['loss']
_snake_case : Dict = ROUGE_KEYS
_snake_case : List[Any] = 'rouge2'
def __init__( self : Optional[int] , lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
_UpperCamelCase = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' )
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' )
super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ )
use_task_specific_params(self.model , '''summarization''' )
save_git_info(self.hparams.output_dir )
_UpperCamelCase = Path(self.output_dir ) / '''metrics.json'''
_UpperCamelCase = Path(self.output_dir ) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path )
_UpperCamelCase = 0
_UpperCamelCase = defaultdict(lowerCAmelCase__ )
_UpperCamelCase = self.config.model_type
_UpperCamelCase = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
_UpperCamelCase = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_UpperCamelCase = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
_UpperCamelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_UpperCamelCase = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_UpperCamelCase = get_git_info()['''repo_sha''']
_UpperCamelCase = hparams.num_workers
_UpperCamelCase = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ):
_UpperCamelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_UpperCamelCase = self.decoder_start_token_id
_UpperCamelCase = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset
)
_UpperCamelCase = False
_UpperCamelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_UpperCamelCase = self.hparams.eval_max_gen_length
else:
_UpperCamelCase = self.model.config.max_length
_UpperCamelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case__ ( self : int , lowerCAmelCase__ : Dict[str, torch.Tensor] ) -> Dict[str, List[str]]:
'''simple docstring'''
_UpperCamelCase = {
k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase__ , Path(self.output_dir ) / '''text_batch.json''' )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / '''tok_batch.json''' )
_UpperCamelCase = True
return readable_batch
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : List[str] ) -> str:
'''simple docstring'''
return self.model(lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[int] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.batch_decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return lmap(str.strip , lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.tokenizer.pad_token_id
_UpperCamelCase , _UpperCamelCase = batch['''input_ids'''], batch['''attention_mask''']
_UpperCamelCase = batch['''labels''']
if isinstance(self.model , lowerCAmelCase__ ):
_UpperCamelCase = self.model._shift_right(lowerCAmelCase__ )
else:
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_UpperCamelCase = decoder_input_ids
self.save_readable_batch(lowerCAmelCase__ )
_UpperCamelCase = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
_UpperCamelCase = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_UpperCamelCase = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
_UpperCamelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_UpperCamelCase = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )
_UpperCamelCase , _UpperCamelCase = label_smoothed_nll_loss(
lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ )
return (loss,)
@property
def snake_case__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def snake_case__ ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self._step(lowerCAmelCase__ )
_UpperCamelCase = dict(zip(self.loss_names , lowerCAmelCase__ ) )
# tokens per batch
_UpperCamelCase = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum()
_UpperCamelCase = batch['''input_ids'''].shape[0]
_UpperCamelCase = batch['''input_ids'''].eq(self.pad ).sum()
_UpperCamelCase = batch['''input_ids'''].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case__ ( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
return self._generative_step(lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
_UpperCamelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_UpperCamelCase = losses['''loss''']
_UpperCamelCase = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
_UpperCamelCase = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_UpperCamelCase = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase__ )
_UpperCamelCase = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()}
_UpperCamelCase = self.step_count
self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path
_UpperCamelCase = flatten_list([x['''preds'''] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"""{prefix}_loss""": loss,
f"""{prefix}_{self.val_metric}""": metric_tensor,
}
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : dict ) -> dict:
'''simple docstring'''
_UpperCamelCase = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_UpperCamelCase = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_UpperCamelCase = (time.time() - ta) / batch['''input_ids'''].shape[0]
_UpperCamelCase = self.ids_to_clean_text(lowerCAmelCase__ )
_UpperCamelCase = self.ids_to_clean_text(batch['''labels'''] )
_UpperCamelCase = self._step(lowerCAmelCase__ )
_UpperCamelCase = dict(zip(self.loss_names , lowerCAmelCase__ ) )
_UpperCamelCase = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) )
base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ )
return base_metrics
def snake_case__ ( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> Optional[int]:
'''simple docstring'''
return self._generative_step(lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
return self.validation_epoch_end(lowerCAmelCase__ , prefix='''test''' )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : str ) -> SeqaSeqDataset:
'''simple docstring'''
_UpperCamelCase = self.n_obs[type_path]
_UpperCamelCase = self.target_lens[type_path]
_UpperCamelCase = self.dataset_class(
self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , )
return dataset
def snake_case__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False ) -> DataLoader:
'''simple docstring'''
_UpperCamelCase = self.get_dataset(lowerCAmelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_UpperCamelCase = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_UpperCamelCase = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
def snake_case__ ( self : Optional[Any] ) -> DataLoader:
'''simple docstring'''
_UpperCamelCase = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ )
return dataloader
def snake_case__ ( self : int ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size )
def snake_case__ ( self : Tuple ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case__ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ )
add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ )
parser.add_argument(
'''--max_source_length''' , default=1024 , type=lowerCAmelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=lowerCAmelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=lowerCAmelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=lowerCAmelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''' )
parser.add_argument('''--freeze_embeds''' , action='''store_true''' )
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=lowerCAmelCase__ )
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=lowerCAmelCase__ )
parser.add_argument('''--max_tokens_per_batch''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument('''--logger_name''' , type=lowerCAmelCase__ , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''' )
parser.add_argument('''--n_train''' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_val''' , type=lowerCAmelCase__ , default=500 , required=lowerCAmelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--n_test''' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument(
'''--task''' , type=lowerCAmelCase__ , default='''summarization''' , required=lowerCAmelCase__ , help='''# examples. -1 means use all.''' )
parser.add_argument('''--label_smoothing''' , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ )
parser.add_argument('''--src_lang''' , type=lowerCAmelCase__ , default='''''' , required=lowerCAmelCase__ )
parser.add_argument('''--tgt_lang''' , type=lowerCAmelCase__ , default='''''' , required=lowerCAmelCase__ )
parser.add_argument('''--eval_beams''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
'''--val_metric''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=['''bleu''', '''rouge2''', '''loss''', None] )
parser.add_argument('''--eval_max_gen_length''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''never generate more than n tokens''' )
parser.add_argument('''--save_top_k''' , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help='''How many checkpoints to save''' )
parser.add_argument(
'''--early_stopping_patience''' , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = 'translation'
_snake_case : Optional[Any] = ['loss']
_snake_case : List[Any] = ['bleu']
_snake_case : Dict = 'bleu'
def __init__( self : Optional[Any] , lowerCAmelCase__ : Dict , **lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = hparams.src_lang
_UpperCamelCase = hparams.tgt_lang
def snake_case__ ( self : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> dict:
'''simple docstring'''
return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ )
def a__ ( lowercase : Tuple, lowercase : List[Any]=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=lowercase )
check_output_dir(lowercase, expected_items=3 )
if model is None:
if "summarization" in args.task:
_UpperCamelCase = SummarizationModule(lowercase )
else:
_UpperCamelCase = TranslationModule(lowercase )
_UpperCamelCase = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
_UpperCamelCase = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_UpperCamelCase = os.environ.get('''WANDB_PROJECT''', lowercase )
_UpperCamelCase = WandbLogger(name=model.output_dir.name, project=lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_UpperCamelCase = WandbLogger(name=model.output_dir.name, project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
_UpperCamelCase = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
_UpperCamelCase = False
_UpperCamelCase = args.val_metric == '''loss'''
_UpperCamelCase = generic_train(
lowercase, lowercase, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, lowercase ), early_stopping_callback=lowercase, logger=lowercase, )
pickle_save(model.hparams, model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
_UpperCamelCase = ''''''
_UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''*.ckpt''' ), recursive=lowercase ) )
if checkpoints:
_UpperCamelCase = checkpoints[-1]
_UpperCamelCase = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
lowercase__ : Any = pl.Trainer.add_argparse_args(parser)
lowercase__ : int = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowercase__ : int = parser.parse_args()
main(args)
| 98 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
A_ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _UpperCamelCase ( A , A , A , A , A ):
for attribute in key.split("." ):
UpperCamelCase_ =getattr(A , A )
if weight_type is not None:
UpperCamelCase_ =getattr(A , A ).shape
else:
UpperCamelCase_ =hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase_ =value
elif weight_type == "weight_g":
UpperCamelCase_ =value
elif weight_type == "weight_v":
UpperCamelCase_ =value
elif weight_type == "bias":
UpperCamelCase_ =value
else:
UpperCamelCase_ =value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _UpperCamelCase ( A , A ):
UpperCamelCase_ =[]
UpperCamelCase_ =fairseq_model.state_dict()
UpperCamelCase_ =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCamelCase_ =None
for name, value in fairseq_dict.items():
UpperCamelCase_ =False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase_ =True
elif name.split("." )[0] == "proj":
UpperCamelCase_ =fairseq_model.proj
UpperCamelCase_ =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCamelCase_ =True
if "*" in mapped_key:
UpperCamelCase_ =name.split(A )[0].split("." )[-2]
UpperCamelCase_ =mapped_key.replace("*" , A )
if "weight_g" in name:
UpperCamelCase_ ="weight_g"
elif "weight_v" in name:
UpperCamelCase_ ="weight_v"
elif "bias" in name:
UpperCamelCase_ ="bias"
elif "weight" in name:
UpperCamelCase_ ="weight"
else:
UpperCamelCase_ =None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f"""Unused weights: {unused_weights}""" )
return proj_weight
def _UpperCamelCase ( A , A , A , A , A ):
UpperCamelCase_ =full_name.split("conv_layers." )[-1]
UpperCamelCase_ =name.split("." )
UpperCamelCase_ =int(items[0] )
UpperCamelCase_ =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase_ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase_ =value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase_ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase_ =value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A )
def _UpperCamelCase ( A ):
UpperCamelCase_ , UpperCamelCase_ =emb.weight.shape
UpperCamelCase_ =nn.Linear(A , A , bias=A )
UpperCamelCase_ =emb.weight.data
return lin_layer
def _UpperCamelCase ( A ):
with open(A , "r" , encoding="utf-8" ) as f:
UpperCamelCase_ =f.readlines()
UpperCamelCase_ =[line.split(" " )[0] for line in lines]
UpperCamelCase_ =len(A )
UpperCamelCase_ ={
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update(dict(zip(A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def _UpperCamelCase ( A , A , A , A , A , A , A , ):
UpperCamelCase_ =WavaVecaConfig.from_pretrained(A )
UpperCamelCase_ =SpeechaTextaConfig.from_pretrained(
A , vocab_size=A , decoder_layers=A , do_stable_layer_norm=A )
UpperCamelCase_ =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
UpperCamelCase_ =model[0].eval()
# set weights for wav2vec2 encoder
UpperCamelCase_ =WavaVecaModel(A )
UpperCamelCase_ =recursively_load_weights_wavaveca(model.encoder , A )
UpperCamelCase_ =SpeechaTextaForCausalLM(A )
UpperCamelCase_ , UpperCamelCase_ =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A )
# set output linear layer
unexpected_keys.remove("embed_out" )
UpperCamelCase_ =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(f"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCamelCase_ =SpeechEncoderDecoderModel(encoder=A , decoder=A )
UpperCamelCase_ =False
# add projection layer
UpperCamelCase_ =nn.Parameter(projection_layer.weight )
UpperCamelCase_ =nn.Parameter(projection_layer.bias )
UpperCamelCase_ =create_vocab_dict(A )
with open(os.path.join(A , "vocab.json" ) , "w" ) as fp:
json.dump(A , A )
UpperCamelCase_ =SpeechaTextaTokenizer(os.path.join(A , "vocab.json" ) )
tokenizer.save_pretrained(A )
UpperCamelCase_ =hf_wavavec.config.to_dict()
UpperCamelCase_ =tokenizer.pad_token_id
UpperCamelCase_ =tokenizer.bos_token_id
UpperCamelCase_ =tokenizer.eos_token_id
UpperCamelCase_ ="speech_to_text_2"
UpperCamelCase_ ="wav2vec2"
UpperCamelCase_ =SpeechEncoderDecoderConfig.from_dict(A )
hf_wavavec.save_pretrained(A )
feature_extractor.save_pretrained(A )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 391 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_a : int = '''ibert'''
def __init__( self , lowerCamelCase__=3_0522 , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-12 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=False , lowerCamelCase__="none" , **lowerCamelCase__ , ) -> int:
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
lowercase__ : Tuple = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Optional[int] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Tuple = hidden_act
lowercase__ : Optional[Any] = intermediate_size
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : int = max_position_embeddings
lowercase__ : Any = type_vocab_size
lowercase__ : int = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Union[str, Any] = position_embedding_type
lowercase__ : List[Any] = quant_mode
lowercase__ : Union[str, Any] = force_dequant
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def UpperCAmelCase__( self ) -> Union[str, Any]:
if self.task == "multiple-choice":
lowercase__ : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 705 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Tuple = '''philschmid/bart-large-cnn-samsum'''
_a : Optional[Any] = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
_a : Union[str, Any] = '''summarizer'''
_a : List[Any] = AutoTokenizer
_a : Optional[Any] = AutoModelForSeqaSeqLM
_a : Any = ['''text''']
_a : List[str] = ['''text''']
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
return self.pre_processor(lowerCamelCase__ , return_tensors="""pt""" , truncation=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict:
return self.model.generate(**lowerCamelCase__ )[0]
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
return self.pre_processor.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ ) | 128 | 0 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__A = "bert-base-cased"
__A = "google/pegasus-xsum"
__A = [" Sam ate lunch today.", "Sams lunch ingredients."]
__A = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
__A = "patrickvonplaten/t5-tiny-random"
__A = "sshleifer/bart-tiny-random"
__A = "sshleifer/tiny-mbart"
__A = "sshleifer/tiny-marian-en-de"
def lowercase__ ( A_: Path , A_: list ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase ="""\n""".join(A_ )
Path(A_ ).open("""w""" ).writelines(A_ )
def lowercase__ ( A_: Dict ) -> Tuple:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(A_ , F'''{split}.source''' ) , A_ )
_dump_articles(os.path.join(A_ , F'''{split}.target''' ) , A_ )
return tmp_dir
class _A ( UpperCamelCase ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[str]:
__UpperCAmelCase =AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__UpperCAmelCase =max(len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) for a in ARTICLES )
__UpperCAmelCase =max(len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) for a in SUMMARIES )
__UpperCAmelCase =4
__UpperCAmelCase =8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__UpperCAmelCase , __UpperCAmelCase ="""ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
__UpperCAmelCase =SeqaSeqDataset(
__SCREAMING_SNAKE_CASE , data_dir=__SCREAMING_SNAKE_CASE , type_path="""train""" , max_source_length=__SCREAMING_SNAKE_CASE , max_target_length=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =DataLoader(__SCREAMING_SNAKE_CASE , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__UpperCAmelCase =shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def _a ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
__UpperCAmelCase =AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__UpperCAmelCase =max(len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) for a in ARTICLES )
__UpperCAmelCase =max(len(tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) for a in SUMMARIES )
__UpperCAmelCase =4
__UpperCAmelCase =LegacySeqaSeqDataset(
__SCREAMING_SNAKE_CASE , data_dir=__SCREAMING_SNAKE_CASE , type_path="""train""" , max_source_length=20 , max_target_length=__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =DataLoader(__SCREAMING_SNAKE_CASE , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def _a ( self : int ) -> str:
__UpperCAmelCase =AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
__UpperCAmelCase =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__UpperCAmelCase =tmp_dir.joinpath("""train.source""" ).open().readlines()
__UpperCAmelCase =Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 128 , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={x.name for x in tmp_dir.iterdir()}
__UpperCAmelCase ={x.name for x in save_dir.iterdir()}
__UpperCAmelCase =save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__SCREAMING_SNAKE_CASE ) < len(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == 1
assert len(packed_examples[0] ) == sum(len(__SCREAMING_SNAKE_CASE ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def _a ( self : str ) -> str:
if not FAIRSEQ_AVAILABLE:
return
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_dataset(max_len=64 )
__UpperCAmelCase =64
__UpperCAmelCase =ds.make_dynamic_sampler(__SCREAMING_SNAKE_CASE , required_batch_size_multiple=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[len(__SCREAMING_SNAKE_CASE ) for x in batch_sampler]
assert len(set(__SCREAMING_SNAKE_CASE ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) # no dropped or added examples
__UpperCAmelCase =DataLoader(__SCREAMING_SNAKE_CASE , batch_sampler=__SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 )
__UpperCAmelCase =[]
__UpperCAmelCase =[]
for batch in data_loader:
__UpperCAmelCase =batch["""input_ids"""].shape
__UpperCAmelCase =src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__UpperCAmelCase =np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(__SCREAMING_SNAKE_CASE )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__SCREAMING_SNAKE_CASE )
assert num_src_per_batch[0] == max(__SCREAMING_SNAKE_CASE )
if failures:
raise AssertionError(f'''too many tokens in {len(__SCREAMING_SNAKE_CASE )} batches''' )
def _a ( self : List[Any] ) -> int:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_dataset(max_len=512 )
__UpperCAmelCase =2
__UpperCAmelCase =ds.make_sortish_sampler(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 )
__UpperCAmelCase =DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , collate_fn=ds.collate_fn , num_workers=2 , sampler=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =tokenizer.pad_token_id
def count_pad_tokens(__SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]="input_ids" ):
return [batch[k].eq(__SCREAMING_SNAKE_CASE ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__SCREAMING_SNAKE_CASE , k="""labels""" ) ) < sum(count_pad_tokens(__SCREAMING_SNAKE_CASE , k="""labels""" ) )
assert sum(count_pad_tokens(__SCREAMING_SNAKE_CASE ) ) < sum(count_pad_tokens(__SCREAMING_SNAKE_CASE ) )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
def _a ( self : Any , __SCREAMING_SNAKE_CASE : Any=1000 , __SCREAMING_SNAKE_CASE : int=128 ) -> List[str]:
if os.getenv("""USE_REAL_DATA""" , __SCREAMING_SNAKE_CASE ):
__UpperCAmelCase ="""examples/seq2seq/wmt_en_ro"""
__UpperCAmelCase =max_len * 2 * 64
if not Path(__SCREAMING_SNAKE_CASE ).joinpath("""train.len""" ).exists():
save_len_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
__UpperCAmelCase ="""examples/seq2seq/test_data/wmt_en_ro"""
__UpperCAmelCase =max_len * 4
save_len_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =SeqaSeqDataset(
__SCREAMING_SNAKE_CASE , data_dir=__SCREAMING_SNAKE_CASE , type_path="""train""" , max_source_length=__SCREAMING_SNAKE_CASE , max_target_length=__SCREAMING_SNAKE_CASE , n_obs=__SCREAMING_SNAKE_CASE , )
return ds, max_tokens, tokenizer
def _a ( self : Optional[Any] ) -> str:
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =self._get_dataset()
__UpperCAmelCase =set(DistributedSortishSampler(__SCREAMING_SNAKE_CASE , 256 , num_replicas=2 , rank=0 , add_extra_examples=__SCREAMING_SNAKE_CASE ) )
__UpperCAmelCase =set(DistributedSortishSampler(__SCREAMING_SNAKE_CASE , 256 , num_replicas=2 , rank=1 , add_extra_examples=__SCREAMING_SNAKE_CASE ) )
assert idsa.intersection(__SCREAMING_SNAKE_CASE ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
__UpperCAmelCase =AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE )
if tok_name == MBART_TINY:
__UpperCAmelCase =SeqaSeqDataset(
__SCREAMING_SNAKE_CASE , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
__UpperCAmelCase =train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__UpperCAmelCase =SeqaSeqDataset(
__SCREAMING_SNAKE_CASE , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
__UpperCAmelCase =train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__SCREAMING_SNAKE_CASE ) == 1 if tok_name == BART_TINY else len(__SCREAMING_SNAKE_CASE ) == 0
| 68 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def A ( self : List[Any] ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=UpperCamelCase__ , )
def A ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )]
def A ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def A ( self : str ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=UpperCamelCase__ , )
def A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} )
]
def A ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(UpperCamelCase__ )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
def __lowerCamelCase ( ) -> int:
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )]
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@require_beam
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase = DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase__ )
self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def A ( self : Any ):
"""simple docstring"""
import apache_beam as beam
UpperCamelCase = beam.io.parquetio.WriteToParquet
UpperCamelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase = DummyBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='DirectRunner' )
with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock:
UpperCamelCase = partial(UpperCamelCase__ , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
UpperCamelCase__ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) )
UpperCamelCase = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase__ )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
@require_beam
def A ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase = DummyBeamDataset(cache_dir=UpperCamelCase__ )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCamelCase = NestedBeamDataset(cache_dir=UpperCamelCase__ , beam_runner='DirectRunner' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) )
UpperCamelCase = builder.as_dataset()
self.assertEqual(dset['train'].num_rows , UpperCamelCase__ )
self.assertEqual(dset['train'].info.splits['train'].num_examples , UpperCamelCase__ )
self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(UpperCamelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) )
del dset
| 430 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _UpperCAmelCase ( a : Tuple , a : Optional[Any] ):
snake_case__ = []
for part_id in partition_order:
snake_case__ = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case__ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
snake_case__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
snake_case__ = spark.range(100 ).repartition(1 )
snake_case__ = Spark(snake_case__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
snake_case__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
snake_case__ = spark.range(10 ).repartition(2 )
snake_case__ = [1, 0]
snake_case__ = _generate_iterable_examples(snake_case__ , snake_case__ ) # Reverse the partitions.
snake_case__ = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , snake_case__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__ , snake_case__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
snake_case__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
snake_case__ = spark.range(10 ).repartition(1 )
snake_case__ = SparkExamplesIterable(snake_case__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case__ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
snake_case__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
snake_case__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
snake_case__ = lambda a : x.reverse()
snake_case__ = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [2, 1, 0] )
snake_case__ = SparkExamplesIterable(snake_case__ ).shuffle_data_sources(snake_case__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case__ ):
snake_case__ , snake_case__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
snake_case__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
snake_case__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case__ ):
snake_case__ , snake_case__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case__ ):
snake_case__ , snake_case__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _UpperCAmelCase ( ):
snake_case__ = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
snake_case__ = spark.range(100 ).repartition(1 )
snake_case__ = Spark(snake_case__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 700 |
import comet # From: unbabel-comet
import torch
import datasets
a__ = datasets.logging.get_logger(__name__)
a__ = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
a__ = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
a__ = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def __magic_name__ ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence"""),
"""predictions""": datasets.Value("""string""" , id="""sequence"""),
"""references""": datasets.Value("""string""" , id="""sequence"""),
}) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def __magic_name__ ( self : str , UpperCamelCase__ : Dict):
'''simple docstring'''
if self.config_name == "default":
snake_case__ = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da"""))
else:
snake_case__ = comet.load_from_checkpoint(comet.download_model(self.config_name))
def __magic_name__ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=False):
'''simple docstring'''
if gpus is None:
snake_case__ = 1 if torch.cuda.is_available() else 0
snake_case__ = {"""src""": sources, """mt""": predictions, """ref""": references}
snake_case__ = [dict(zip(UpperCamelCase__ , UpperCamelCase__)) for t in zip(*data.values())]
snake_case__ , snake_case__ = self.scorer.predict(UpperCamelCase__ , gpus=UpperCamelCase__ , progress_bar=UpperCamelCase__)
return {"mean_score": mean_score, "scores": scores}
| 99 | 0 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = True , lowerCamelCase = False ) -> Optional[int]:
"""simple docstring"""
snake_case__ : List[str] = scheduler
snake_case__ : Tuple = optimizers if isinstance(lowerCamelCase , (list, tuple) ) else [optimizers]
snake_case__ : Optional[Any] = split_batches
snake_case__ : Tuple = step_with_optimizer
snake_case__ : str = GradientState()
def lowercase__ ( self , *lowerCamelCase , **lowerCamelCase ) -> Dict:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCamelCase , **lowerCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCamelCase , **lowerCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
snake_case__ : List[str] = AcceleratorState().num_processes
for _ in range(lowerCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCamelCase , **lowerCamelCase )
else:
self.scheduler.step(*lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.state_dict()
def lowercase__ ( self , lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
self.scheduler.load_state_dict(lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_lr()
def lowercase__ ( self , *lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*lowerCamelCase , **lowerCamelCase )
| 261 |
'''simple docstring'''
import numpy as np
def _A ( snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : float = 1E-12 , snake_case__ : int = 1_00 , ):
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
snake_case__ : Tuple = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
snake_case__ : str = False
snake_case__ : Any = 0
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = 1E12
while not convergence:
# Multiple matrix by the vector.
snake_case__ : Any = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
snake_case__ : Dict = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
snake_case__ : Tuple = vector.conj().T if is_complex else vector.T
snake_case__ : Optional[Any] = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
snake_case__ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
snake_case__ : Optional[int] = True
snake_case__ : int = lambda_
if is_complex:
snake_case__ : Optional[int] = np.real(lambda_ )
return lambda_, vector
def _A ( ):
snake_case__ : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
snake_case__ : List[Any] = np.array([41, 4, 20] )
snake_case__ : str = real_input_matrix.astype(np.complexaaa )
snake_case__ : str = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
snake_case__ : List[str] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
snake_case__ : Dict = real_input_matrix
snake_case__ : str = real_vector
elif problem_type == "complex":
snake_case__ : Optional[Any] = complex_input_matrix
snake_case__ : Any = complex_vector
# Our implementation.
snake_case__ ,snake_case__ : Optional[Any] = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
snake_case__ ,snake_case__ : int = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
snake_case__ : List[str] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
snake_case__ : Dict = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 261 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ) -> List[str]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_attention_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_choices
def _snake_case ( self ) -> Any:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_attention_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase_ ( __magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Dict = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = FlaxAlbertModelTester(self )
@slow
def _snake_case ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
_lowerCAmelCase = model_class_name.from_pretrained("albert-base-v2" )
_lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Optional[Any]:
_lowerCAmelCase = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_lowerCAmelCase = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
_lowerCAmelCase = (1, 11, 768)
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
| 489 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "spiece.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model",
"AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model",
}
}
_SCREAMING_SNAKE_CASE = {
"AI-Sweden/gpt-sw3-126m": 20_48,
"AI-Sweden/gpt-sw3-350m": 20_48,
"AI-Sweden/gpt-sw3-1.6b": 20_48,
"AI-Sweden/gpt-sw3-6.7b": 20_48,
"AI-Sweden/gpt-sw3-20b": 20_48,
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = ["input_ids", "attention_mask"]
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCAmelCase = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCAmelCase = "<|endoftext|>" if eos_token is None else eos_token
_lowerCAmelCase = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCAmelCase = unk_token if pad_token is None else pad_token
_lowerCAmelCase = eos_token if bos_token is None else bos_token
else:
_lowerCAmelCase = "<pad>" if pad_token is None else pad_token
_lowerCAmelCase = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCAmelCase )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCAmelCase = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCAmelCase = re.compile(
f'''[{''.join(map(_lowerCAmelCase , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8203] ) )}]''' )
def __getstate__( self ) -> Any:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def _snake_case ( self ) -> int:
return len(self.sp_model )
def _snake_case ( self , _lowerCAmelCase ) -> str:
_lowerCAmelCase = self.non_printing_characters_re.sub("" , _lowerCAmelCase )
# Normalize whitespaces
_lowerCAmelCase = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCAmelCase = unicodedata.normalize("NFC" , _lowerCAmelCase )
return text
def _snake_case ( self , _lowerCAmelCase , **_lowerCAmelCase ) -> List[str]:
_lowerCAmelCase = self.preprocess_text(_lowerCAmelCase )
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> int:
return self.sp_model.PieceToId(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> str:
return self.sp_model.IdToPiece(_lowerCAmelCase )
@staticmethod
def _snake_case ( _lowerCAmelCase ) -> str:
return out_string
def _snake_case ( self , _lowerCAmelCase ) -> str:
_lowerCAmelCase = []
_lowerCAmelCase = ""
_lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
_lowerCAmelCase = True
_lowerCAmelCase = []
else:
current_sub_tokens.append(_lowerCAmelCase )
_lowerCAmelCase = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string
def _snake_case ( self ) -> Dict[str, int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = self.preprocess_text(_lowerCAmelCase )
_lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase )
else:
_lowerCAmelCase = [self.preprocess_text(_lowerCAmelCase ) for t in text]
_lowerCAmelCase = self.sp_model.encode(_lowerCAmelCase )
if return_tensors is True or return_tensors == "pt":
_lowerCAmelCase = torch.tensor(_lowerCAmelCase )
return token_ids
def _snake_case ( self , _lowerCAmelCase ) -> str:
return self.sp_model.decode(_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> List[int]:
_lowerCAmelCase = [f'''User: {text}''' if is_user else f'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
_lowerCAmelCase = (
f'''{self.eos_token}{self.bos_token}''' + f'''{self.bos_token}'''.join(_lowerCAmelCase ) + f'''{self.bos_token}Bot:'''
)
return self.encode(text=_lowerCAmelCase )
| 489 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: List[str]=False ) -> Any:
'''simple docstring'''
A__ = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
A__ = "segformer.encoder." + key
if key.startswith("backbone" ):
A__ = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A__ = key[key.find("patch_embed" ) + len("patch_embed" )]
A__ = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(SCREAMING_SNAKE_CASE_ )-1}' )
if "norm" in key:
A__ = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A__ = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
A__ = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(SCREAMING_SNAKE_CASE_ )-1}' )
if "layer_norm1" in key:
A__ = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
A__ = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
A__ = key[key.find("block" ) + len("block" )]
A__ = key.replace(F'block{idx}' , F'block.{int(SCREAMING_SNAKE_CASE_ )-1}' )
if "attn.q" in key:
A__ = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
A__ = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
A__ = key.replace("attn" , "attention.self" )
if "fc1" in key:
A__ = key.replace("fc1" , "dense1" )
if "fc2" in key:
A__ = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
A__ = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
A__ = key.replace("linear_fuse.conv" , "linear_fuse" )
A__ = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A__ = key[key.find("linear_c" ) + len("linear_c" )]
A__ = key.replace(F'linear_c{idx}' , F'linear_c.{int(SCREAMING_SNAKE_CASE_ )-1}' )
if key.startswith("head" ):
A__ = key.replace("head" , "classifier" )
A__ = value
return new_state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> int:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A__ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
A__ = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
A__ = kv_weight[
: config.hidden_sizes[i], :
]
A__ = kv_bias[: config.hidden_sizes[i]]
A__ = kv_weight[
config.hidden_sizes[i] :, :
]
A__ = kv_bias[
config.hidden_sizes[i] :
]
def lowerCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]:
'''simple docstring'''
A__ = SegformerConfig()
A__ = False
# set attributes based on model_name
A__ = "huggingface/label-files"
if "segformer" in model_name:
A__ = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
A__ = 1_5_0
A__ = "ade20k-id2label.json"
A__ = (1, 1_5_0, 1_2_8, 1_2_8)
elif "city" in model_name:
A__ = 1_9
A__ = "cityscapes-id2label.json"
A__ = (1, 1_9, 1_2_8, 1_2_8)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
A__ = True
A__ = model_name[4:6]
A__ = 1_0_0_0
A__ = "imagenet-1k-id2label.json"
A__ = (1, 1_0_0_0)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
A__ = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
A__ = [6_4, 1_2_8, 3_2_0, 5_1_2]
A__ = 2_5_6
elif size == "b2":
A__ = [6_4, 1_2_8, 3_2_0, 5_1_2]
A__ = 7_6_8
A__ = [3, 4, 6, 3]
elif size == "b3":
A__ = [6_4, 1_2_8, 3_2_0, 5_1_2]
A__ = 7_6_8
A__ = [3, 4, 1_8, 3]
elif size == "b4":
A__ = [6_4, 1_2_8, 3_2_0, 5_1_2]
A__ = 7_6_8
A__ = [3, 8, 2_7, 3]
elif size == "b5":
A__ = [6_4, 1_2_8, 3_2_0, 5_1_2]
A__ = 7_6_8
A__ = [3, 6, 4_0, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
A__ = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=SCREAMING_SNAKE_CASE_ , align=SCREAMING_SNAKE_CASE_ , do_random_crop=SCREAMING_SNAKE_CASE_ )
# prepare image
A__ = prepare_img()
A__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=torch.device("cpu" ) )
else:
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
A__ = rename_keys(SCREAMING_SNAKE_CASE_ , encoder_only=SCREAMING_SNAKE_CASE_ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# create HuggingFace model and load state dict
if encoder_only:
A__ = False
A__ = SegformerForImageClassification(SCREAMING_SNAKE_CASE_ )
else:
A__ = SegformerForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# forward pass
A__ = model(SCREAMING_SNAKE_CASE_ )
A__ = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
A__ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
A__ = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
A__ = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
A__ = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
A__ = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
A__ = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
A__ = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
A__ = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
A__ = torch.tensor(
[
[
[-1.1_372e01, -1.2_787e01, -1.3_477e01],
[-1.2_536e01, -1.4_194e01, -1.4_409e01],
[-1.3_217e01, -1.4_888e01, -1.5_327e01],
],
[
[-1.4_791e01, -1.7_122e01, -1.8_277e01],
[-1.7_163e01, -1.9_192e01, -1.9_533e01],
[-1.7_897e01, -1.9_991e01, -2.0_315e01],
],
[
[7.6_723e-01, 4.1_921e-01, -7.7_878e-02],
[4.7_772e-01, 9.5_557e-03, -2.8_082e-01],
[3.6_032e-01, -2.4_826e-01, -5.1_168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
A__ = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
A__ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
A__ = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
A__ = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
A__ = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
A__ = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
A__ = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""segformer.b0.512x512.ade.160k""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCAmelCase__ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 514 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 514 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["speech"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""speech"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["speech"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""speech"""] )
| 704 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = ShapEPipeline
_UpperCamelCase : Optional[Any] = ["prompt"]
_UpperCamelCase : Tuple = ["prompt"]
_UpperCamelCase : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
_UpperCamelCase : str = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 8
@property
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(a__ )
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
_lowerCAmelCase : Any = PriorTransformer(**a__ )
return model
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Tuple = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase : Dict = ShapERenderer(**a__ )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_prior
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : Dict = self.dummy_renderer
_lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=a__ , clip_sample=a__ , clip_sample_range=1.0 , )
_lowerCAmelCase : List[Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def __A ( self , a__ , a__=0 ):
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[str] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Union[str, Any] = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Dict = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : List[Any] = """cpu"""
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = self.pipeline_class(**a__ )
_lowerCAmelCase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[str] = output.images[0]
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase : Union[str, Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ):
_lowerCAmelCase : Any = torch_device == """cpu"""
_lowerCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a__ , relax_max_difference=a__ , )
def __A ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = self.pipeline_class(**a__ )
_lowerCAmelCase : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(a__ )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase : str = batch_size * [inputs[key]]
_lowerCAmelCase : Tuple = pipe(**a__ , num_images_per_prompt=a__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
_lowerCAmelCase : Union[str, Any] = ShapEPipeline.from_pretrained("""openai/shap-e""" )
_lowerCAmelCase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = torch.Generator(device=a__ ).manual_seed(0 )
_lowerCAmelCase : Any = pipe(
"""a shark""" , generator=a__ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a__ , a__ )
| 663 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : int = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
__SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase__ ( self : int , **_A : Any ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self : Optional[int] , **_A : Optional[Any] ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self : Union[str, Any] , **_A : Optional[int] ):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[int] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__SCREAMING_SNAKE_CASE : List[str] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor(do_normalize=_A )
__SCREAMING_SNAKE_CASE : str = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(_A , return_tensors='''np''' )
__SCREAMING_SNAKE_CASE : List[str] = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : str = '''Alexandra,T-shirt的价格是15便士。'''
__SCREAMING_SNAKE_CASE : List[Any] = processor(text=_A )
__SCREAMING_SNAKE_CASE : str = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : int = '''Alexandra,T-shirt的价格是15便士。'''
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : int = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : str = processor.batch_decode(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__SCREAMING_SNAKE_CASE : int = '''Alexandra,T-shirt的价格是15便士。'''
__SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Optional[int] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 74 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCamelCase = numpy.array([0, 0])
lowerCamelCase = numpy.array([0.5, 0.866_0254])
lowerCamelCase = numpy.array([1, 0])
lowerCamelCase = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a_ ( SCREAMING_SNAKE_CASE__ : list[numpy.ndarray] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] =initial_vectors
for _ in range(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : List[str] =iteration_step(SCREAMING_SNAKE_CASE__ )
return vectors
def a_ ( SCREAMING_SNAKE_CASE__ : list[numpy.ndarray] ):
'''simple docstring'''
_lowerCamelCase : Dict =[]
for i, start_vector in enumerate(vectors[:-1] ):
_lowerCamelCase : Tuple =vectors[i + 1]
new_vectors.append(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : Tuple =end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a_ ( SCREAMING_SNAKE_CASE__ : numpy.ndarray , SCREAMING_SNAKE_CASE__ : float ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =numpy.radians(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase , _lowerCamelCase : List[Any] =numpy.cos(SCREAMING_SNAKE_CASE__ ), numpy.sin(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : str =numpy.array(((c, -s), (s, c)) )
return numpy.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : list[numpy.ndarray] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] =plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_lowerCamelCase , _lowerCamelCase : Tuple =zip(*SCREAMING_SNAKE_CASE__ )
plt.plot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 464 | 0 |
import math
def UpperCAmelCase__ ( _A ):
"""simple docstring"""
a_ = []
a_ = 2
a_ = int(math.sqrt(_A ) ) # Size of every segment
a_ = [True] * (end + 1)
a_ = []
while start <= end:
if temp[start] is True:
in_prime.append(_A )
for i in range(start * start , end + 1 , _A ):
a_ = False
start += 1
prime += in_prime
a_ = end + 1
a_ = min(2 * end , _A )
while low <= n:
a_ = [True] * (high - low + 1)
for each in in_prime:
a_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_A , high + 1 , _A ):
a_ = False
for j in range(len(_A ) ):
if temp[j] is True:
prime.append(j + low )
a_ = high + 1
a_ = min(high + end , _A )
return prime
print(sieve(10**6))
| 143 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class __lowercase ( a__ ):
def __init__( self : List[Any] , *lowercase__ : Tuple , **lowercase__ : List[Any] ):
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 143 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f'could not parse string as bool {string}' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 43 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __UpperCamelCase ( lowercase__ , lowercase__ ):
lowercase : Tuple = 1
@register_to_config
def __init__( self :Optional[Any] ,_UpperCamelCase :Tuple=2_0_0_0 ,_UpperCamelCase :List[str]=0.1 ,_UpperCamelCase :Optional[int]=2_0 ,_UpperCamelCase :Any=1E-3 ):
snake_case_ : int = None
snake_case_ : Tuple = None
snake_case_ : Optional[Any] = None
def a__ ( self :Tuple ,_UpperCamelCase :List[str] ,_UpperCamelCase :Union[str, torch.device] = None ):
snake_case_ : List[Any] = torch.linspace(1 ,self.config.sampling_eps ,_UpperCamelCase ,device=_UpperCamelCase )
def a__ ( self :Any ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Any ,_UpperCamelCase :Tuple=None ):
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case_ : Union[str, Any] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case_ : List[str] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case_ : Tuple = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case_ : Dict = std.unsqueeze(-1 )
snake_case_ : Tuple = -score / std
# compute
snake_case_ : Any = -1.0 / len(self.timesteps )
snake_case_ : Union[str, Any] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case_ : Any = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case_ : Optional[int] = beta_t.unsqueeze(-1 )
snake_case_ : List[str] = -0.5 * beta_t * x
snake_case_ : Tuple = torch.sqrt(_UpperCamelCase )
snake_case_ : Any = drift - diffusion**2 * score
snake_case_ : List[str] = x + drift * dt
# add noise
snake_case_ : List[Any] = randn_tensor(x.shape ,layout=x.layout ,generator=_UpperCamelCase ,device=x.device ,dtype=x.dtype )
snake_case_ : int = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self :List[Any] ):
return self.config.num_train_timesteps | 334 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=1_3 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : int=9_9 , UpperCAmelCase__ : str=3_2 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Optional[Any]=3_7 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Union[str, Any]=5_1_2 , UpperCAmelCase__ : Optional[Any]=1_6 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : int=None , ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : Tuple = 1_3
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : str = 9_9
SCREAMING_SNAKE_CASE : Optional[Any] = 3_8_4
SCREAMING_SNAKE_CASE : Optional[int] = 2
SCREAMING_SNAKE_CASE : str = 4
SCREAMING_SNAKE_CASE : str = 3_7
SCREAMING_SNAKE_CASE : List[Any] = """gelu"""
SCREAMING_SNAKE_CASE : Dict = 0.1
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1
SCREAMING_SNAKE_CASE : Dict = 5_1_2
SCREAMING_SNAKE_CASE : int = 1_6
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : str = 0.02
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : Tuple = 4
SCREAMING_SNAKE_CASE : Any = 1_2_8
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : str = 9
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = None
def _lowercase ( self : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : int = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TFConvBertModel(config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
SCREAMING_SNAKE_CASE : str = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TFConvBertForMaskedLM(config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = TFConvBertForSequenceClassification(config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertForMultipleChoice(config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : Dict = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Optional[int] = TFConvBertForTokenClassification(config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = TFConvBertForQuestionAnswering(config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : int ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class a__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : List[str] =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Dict =False
UpperCAmelCase__ : Union[str, Any] =False
UpperCAmelCase__ : List[str] =False
def _lowercase ( self : int ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = TFConvBertModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def _lowercase ( self : str ) ->int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _lowercase ( self : List[Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def _lowercase ( self : Any ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def _lowercase ( self : Any ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Dict = True
if hasattr(UpperCAmelCase__ , """use_cache""" ):
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(self.model_tester , """key_length""" , UpperCAmelCase__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = len(model(UpperCAmelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ , saved_model=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(UpperCAmelCase__ , """saved_model""" , """1""" )
SCREAMING_SNAKE_CASE : int = tf.keras.models.load_model(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase__ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : int = outputs["""encoder_hidden_states"""]
SCREAMING_SNAKE_CASE : Tuple = outputs["""encoder_attentions"""]
else:
SCREAMING_SNAKE_CASE : str = outputs["""hidden_states"""]
SCREAMING_SNAKE_CASE : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowercase ( self : List[str] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCAmelCase__ )
def _lowercase ( self : Any ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Dict = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
SCREAMING_SNAKE_CASE : int = getattr(self.model_tester , """key_length""" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = getattr(self.model_tester , """key_length""" , UpperCAmelCase__ )
def check_decoder_attentions_output(UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE : str = len(UpperCAmelCase__ )
self.assertEqual(out_len % 2 , 0 )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase__ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Any = model_class(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : str = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE : List[Any] = len(UpperCAmelCase__ )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
if self.is_encoder_decoder:
SCREAMING_SNAKE_CASE : List[Any] = model_class(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_decoder_attentions_output(UpperCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = model_class(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Tuple ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Tuple = model(UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 6, 7_6_8]
self.assertEqual(output.shape , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Any = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : str = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 446 | 0 |
def _lowerCAmelCase ( UpperCamelCase__: int ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
A = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A = 1
if upper_limit > 0:
A = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCamelCase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
_lowercase : List[str] = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 641 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCAmelCase ( UpperCamelCase__: List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
if subparsers is not None:
A = subparsers.add_parser("""test""" )
else:
A = argparse.ArgumentParser("""Accelerate test command""" )
parser.add_argument(
"""--config_file""" , default=UpperCamelCase__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase__ )
return parser
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Any:
"""simple docstring"""
A = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] )
if args.config_file is None:
A = script_name
else:
A = f'--config_file={args.config_file} {script_name}'
A = ["""accelerate-launch"""] + test_args.split()
A = execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
if result.returncode == 0:
print("""Test is a success! You are ready for your distributed training!""" )
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
A = test_command_parser()
A = parser.parse_args()
test_command(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 641 | 1 |
'''simple docstring'''
from PIL import Image
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
def brightness(UpperCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''' )
return img.point(UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
__a: Optional[int] = change_brightness(img, 1_00)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 428 | '''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__a: str = get_tests_dir("""fixtures""")
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> str:
# A mock response for an HTTP head request to emulate server down
lowercase__ : Any = mock.Mock()
lowercase__ : Dict = 500
lowercase__ : Dict = {}
lowercase__ : Optional[int] = HTTPError
lowercase__ : int = {}
# Download this model to make sure it's in the cache.
lowercase__ : Any = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=__lowerCAmelCase ) as mock_head:
lowercase__ : Optional[int] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCAmelCase( self ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
lowercase__ : Union[str, Any] = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def _lowerCAmelCase( self ) -> List[str]:
with self.assertRaises(__lowerCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : Any = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
lowercase__ : str = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(__lowerCAmelCase )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase( cls ) -> List[str]:
lowercase__ : str = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def _lowerCAmelCase( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : str = ViTImageProcessor.from_pretrained(__lowerCAmelCase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
lowercase__ : Tuple = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowerCAmelCase , repo_id='''test-image-processor''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
lowercase__ : Tuple = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def _lowerCAmelCase( self ) -> int:
lowercase__ : Dict = ViTImageProcessor.from_pretrained(__lowerCAmelCase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
lowercase__ : List[Any] = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowerCAmelCase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
lowercase__ : Tuple = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def _lowerCAmelCase( self ) -> List[Any]:
CustomImageProcessor.register_for_auto_class()
lowercase__ : Dict = CustomImageProcessor.from_pretrained(__lowerCAmelCase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
lowercase__ : Union[str, Any] = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 428 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCAmelCase = logging.get_logger(__name__)
class A ( _SCREAMING_SNAKE_CASE ):
UpperCamelCase_ : Dict =["pixel_values"]
def __init__(self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = 1 / 2_5_5 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , **lowerCAmelCase , ):
super().__init__(**snake_case_ )
__lowercase= size if size is not None else {'shortest_edge': 2_2_4}
__lowercase= get_size_dict(snake_case_ , default_to_square=snake_case_ )
__lowercase= crop_size if crop_size is not None else {'height': 2_5_6, 'width': 2_5_6}
__lowercase= get_size_dict(snake_case_ , param_name='crop_size' )
__lowercase= do_resize
__lowercase= size
__lowercase= resample
__lowercase= do_rescale
__lowercase= rescale_factor
__lowercase= do_center_crop
__lowercase= crop_size
__lowercase= do_flip_channel_order
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PIL.Image.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
__lowercase= get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
__lowercase= get_resize_output_image_size(snake_case_ , size=size['shortest_edge'] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
__lowercase= get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(snake_case_ , size=(size['height'], size['width']) , data_format=snake_case_ , **snake_case_ )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
return flip_channel_order(snake_case_ , data_format=snake_case_ )
def _A (self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
__lowercase= do_resize if do_resize is not None else self.do_resize
__lowercase= resample if resample is not None else self.resample
__lowercase= do_rescale if do_rescale is not None else self.do_rescale
__lowercase= rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase= do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase= (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__lowercase= size if size is not None else self.size
__lowercase= get_size_dict(snake_case_ , default_to_square=snake_case_ )
__lowercase= crop_size if crop_size is not None else self.crop_size
__lowercase= get_size_dict(snake_case_ , param_name='crop_size' )
__lowercase= make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
__lowercase= [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
__lowercase= [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
__lowercase= [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
__lowercase= [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__lowercase= [self.flip_channel_order(image=snake_case_ ) for image in images]
__lowercase= [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
__lowercase= {'pixel_values': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
def _A (self , lowerCAmelCase , lowerCAmelCase = None ):
__lowercase= outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(snake_case_ ):
__lowercase= target_sizes.numpy()
__lowercase= []
for idx in range(len(snake_case_ ) ):
__lowercase= torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=snake_case_ )
__lowercase= resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case_ )
else:
__lowercase= logits.argmax(dim=1 )
__lowercase= [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 230 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def A__ ( A__ , A__ ) -> Optional[int]:
'''simple docstring'''
assert isinstance(A__ , A__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A__ ( A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader(A__ , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_json_dataset(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A__ ( A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
_check_json_dataset(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def A__ ( A__ , A__ , A__ ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
assert isinstance(A__ , A__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def A__ ( A__ , A__ ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
_UpperCAmelCase = features.copy()
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = JsonDatasetReader(A__ , features=A__ , cache_dir=A__ ).read()
assert isinstance(A__ , A__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A__ ( A__ , A__ , A__ ) -> str:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = JsonDatasetReader(A__ , cache_dir=A__ , split=A__ ).read()
_check_json_dataset(A__ , A__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def A__ ( A__ , A__ , A__ ) -> Dict:
'''simple docstring'''
if issubclass(A__ , A__ ):
_UpperCAmelCase = jsonl_path
elif issubclass(A__ , A__ ):
_UpperCAmelCase = [jsonl_path]
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = JsonDatasetReader(A__ , cache_dir=A__ ).read()
_check_json_dataset(A__ , A__ )
def A__ ( A__ , A__ , A__=("train",) ) -> List[str]:
'''simple docstring'''
assert isinstance(A__ , A__ )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def A__ ( A__ , A__ , A__ ) -> Any:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader({"train": jsonl_path} , cache_dir=A__ , keep_in_memory=A__ ).read()
_check_json_datasetdict(A__ , A__ )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def A__ ( A__ , A__ , A__ ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(A__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader({"train": jsonl_path} , features=A__ , cache_dir=A__ ).read()
_check_json_datasetdict(A__ , A__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def A__ ( A__ , A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
if split:
_UpperCAmelCase = {split: jsonl_path}
else:
_UpperCAmelCase = "train"
_UpperCAmelCase = {"train": jsonl_path, "test": jsonl_path}
_UpperCAmelCase = tmp_path / "cache"
_UpperCAmelCase = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_UpperCAmelCase = JsonDatasetReader(A__ , cache_dir=A__ ).read()
_check_json_datasetdict(A__ , A__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def A__ ( A__ ) -> List[Any]:
'''simple docstring'''
return json.load(A__ )
def A__ ( A__ ) -> int:
'''simple docstring'''
return [json.loads(A__ ) for line in buffer]
class a :
"""simple docstring"""
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
assert isinstance(exported_content[0] , snake_case_ )
assert len(snake_case_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , orient=snake_case_ ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(snake_case_ ) == 10
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
assert isinstance(exported_content[0] , snake_case_ )
assert len(snake_case_ ) == 10
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , orient=snake_case_ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(snake_case_ ) == 10
def __A ( self , snake_case_ ) -> Any:
with pytest.raises(snake_case_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
_UpperCAmelCase = tmp_path_factory.mktemp("data" ) / F"""test.json.{extension}"""
_UpperCAmelCase = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(snake_case_ , snake_case_ , compression=snake_case_ ).write()
with fsspec.open(snake_case_ , "rb" , compression="infer" ) as f:
_UpperCAmelCase = f.read()
with fsspec.open(snake_case_ , "rb" , compression="infer" ) as f:
_UpperCAmelCase = f.read()
assert exported_content == original_content
| 426 | 0 |
_lowerCamelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
} | 59 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['LayoutLMv3FeatureExtractor']
_lowerCamelCase = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 59 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.