code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = 'MobileNetV1Config'
# Base docstring
__lowerCAmelCase = 'google/mobilenet_v1_1.0_224'
__lowerCAmelCase = [1, 1_024, 7, 7]
# Image classification docstring
__lowerCAmelCase = 'google/mobilenet_v1_1.0_224'
__lowerCAmelCase = 'tabby, tabby cat'
__lowerCAmelCase = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
_snake_case = {}
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = model.mobilenet_va
else:
_snake_case = model
_snake_case = """MobilenetV1/Conv2d_0/"""
_snake_case = backbone.conv_stem.convolution.weight
_snake_case = backbone.conv_stem.normalization.bias
_snake_case = backbone.conv_stem.normalization.weight
_snake_case = backbone.conv_stem.normalization.running_mean
_snake_case = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_snake_case = i + 1
_snake_case = i * 2
_snake_case = backbone.layer[pt_index]
_snake_case = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
_snake_case = pointer.convolution.weight
_snake_case = pointer.normalization.bias
_snake_case = pointer.normalization.weight
_snake_case = pointer.normalization.running_mean
_snake_case = pointer.normalization.running_var
_snake_case = backbone.layer[pt_index + 1]
_snake_case = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
_snake_case = pointer.convolution.weight
_snake_case = pointer.normalization.bias
_snake_case = pointer.normalization.weight
_snake_case = pointer.normalization.running_mean
_snake_case = pointer.normalization.running_var
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_snake_case = model.classifier.weight
_snake_case = model.classifier.bias
return tf_to_pt_map
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_snake_case = tf.train.list_variables(_SCREAMING_SNAKE_CASE )
_snake_case = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
_snake_case = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case = array
# Build TF to PyTorch weights loading map
_snake_case = _build_tf_to_pytorch_map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
_snake_case = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_snake_case = np.transpose(_SCREAMING_SNAKE_CASE , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_snake_case = array.squeeze().transpose()
else:
_snake_case = np.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
_snake_case = torch.from_numpy(_SCREAMING_SNAKE_CASE )
tf_weights.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + """/RMSProp""" , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + """/RMSProp_1""" , _SCREAMING_SNAKE_CASE )
tf_weights.pop(name + """/ExponentialMovingAverage""" , _SCREAMING_SNAKE_CASE )
logger.info(f"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case, _snake_case = features.shape[-2:]
_snake_case, _snake_case = conv_layer.stride
_snake_case, _snake_case = conv_layer.kernel_size
if in_height % stride_height == 0:
_snake_case = max(kernel_height - stride_height , 0 )
else:
_snake_case = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_snake_case = max(kernel_width - stride_width , 0 )
else:
_snake_case = max(kernel_width - (in_width % stride_width) , 0 )
_snake_case = pad_along_width // 2
_snake_case = pad_along_width - pad_left
_snake_case = pad_along_height // 2
_snake_case = pad_along_height - pad_top
_snake_case = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , """constant""" , 0.0 )
class _lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1 , UpperCAmelCase = 1 , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = True , ) -> None:
super().__init__()
_snake_case = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
_snake_case = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_snake_case = nn.Convad(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=UpperCAmelCase , groups=UpperCAmelCase , bias=UpperCAmelCase , padding_mode="""zeros""" , )
if use_normalization:
_snake_case = nn.BatchNormad(
num_features=UpperCAmelCase , eps=config.layer_norm_eps , momentum=0.9997 , affine=UpperCAmelCase , track_running_stats=UpperCAmelCase , )
else:
_snake_case = None
if use_activation:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_snake_case = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase ):
_snake_case = ACTaFN[config.hidden_act]
else:
_snake_case = config.hidden_act
else:
_snake_case = None
def lowercase (self , UpperCAmelCase ) -> torch.Tensor:
if self.config.tf_padding:
_snake_case = apply_tf_padding(UpperCAmelCase , self.convolution )
_snake_case = self.convolution(UpperCAmelCase )
if self.normalization is not None:
_snake_case = self.normalization(UpperCAmelCase )
if self.activation is not None:
_snake_case = self.activation(UpperCAmelCase )
return features
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = MobileNetVaConfig
lowerCAmelCase_ = load_tf_weights_in_mobilenet_va
lowerCAmelCase_ = "mobilenet_v1"
lowerCAmelCase_ = "pixel_values"
lowerCAmelCase_ = False
def lowercase (self , UpperCAmelCase ) -> None:
if isinstance(UpperCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
__lowerCAmelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__lowerCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , __snake_case , )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase = True ) -> Dict:
super().__init__(UpperCAmelCase )
_snake_case = config
_snake_case = 32
_snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
_snake_case = MobileNetVaConvLayer(
UpperCAmelCase , in_channels=config.num_channels , out_channels=UpperCAmelCase , kernel_size=3 , stride=2 , )
_snake_case = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_snake_case = nn.ModuleList()
for i in range(13 ):
_snake_case = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_snake_case = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , kernel_size=1 , ) )
_snake_case = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowercase (self , UpperCAmelCase ) -> Dict:
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowercase (self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
_snake_case = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_snake_case = self.conv_stem(UpperCAmelCase )
_snake_case = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_snake_case = layer_module(UpperCAmelCase )
if output_hidden_states:
_snake_case = all_hidden_states + (hidden_states,)
_snake_case = hidden_states
if self.pooler is not None:
_snake_case = torch.flatten(self.pooler(UpperCAmelCase ) , start_dim=1 )
else:
_snake_case = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=UpperCAmelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __snake_case , )
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> None:
super().__init__(UpperCAmelCase )
_snake_case = config.num_labels
_snake_case = MobileNetVaModel(UpperCAmelCase )
_snake_case = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_snake_case = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase )
_snake_case = nn.Linear(UpperCAmelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowercase (self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
_snake_case = return_dict if return_dict is not None else self.config.use_return_dict
_snake_case = self.mobilenet_va(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
_snake_case = outputs.pooler_output if return_dict else outputs[1]
_snake_case = self.classifier(self.dropout(UpperCAmelCase ) )
_snake_case = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_snake_case = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_snake_case = """single_label_classification"""
else:
_snake_case = """multi_label_classification"""
if self.config.problem_type == "regression":
_snake_case = MSELoss()
if self.num_labels == 1:
_snake_case = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_snake_case = loss_fct(UpperCAmelCase , UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
_snake_case = CrossEntropyLoss()
_snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_snake_case = BCEWithLogitsLoss()
_snake_case = loss_fct(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
_snake_case = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states , ) | 585 |
'''simple docstring'''
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) == 0:
return []
_snake_case, _snake_case = min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
_snake_case = int(max_value - min_value ) + 1
_snake_case = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in my_list:
buckets[int(i - min_value )].append(_SCREAMING_SNAKE_CASE )
return [v for bucket in buckets for v in sorted(_SCREAMING_SNAKE_CASE )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] | 585 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __lowercase , unittest.TestCase ):
_a = ReformerTokenizer
_a = ReformerTokenizerFast
_a = True
_a = False
_a = True
def __lowercase ( self : str ):
super().setUp()
lowerCAmelCase = ReformerTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self : List[str] ):
lowerCAmelCase = "<s>"
lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def __lowercase ( self : int ):
lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowerCAmelCase ) , 1000 )
def __lowercase ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __lowercase ( self : Tuple ):
if not self.test_rust_tokenizer:
return
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = "I was born in 92000, and this is falsé."
lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase )
lowerCAmelCase = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
lowerCAmelCase = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = self.get_rust_tokenizer()
lowerCAmelCase = tokenizer.encode(lowerCAmelCase )
lowerCAmelCase = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : List[str] , lowerCAmelCase : List[str]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
# Simple input
lowerCAmelCase = "This is a simple input"
lowerCAmelCase = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase = ("This is a simple input", "This is a pair")
lowerCAmelCase = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode , lowerCAmelCase , max_length=lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCAmelCase , tokenizer_r.encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCAmelCase , tokenizer_r.batch_encode_plus , lowerCAmelCase , max_length=lowerCAmelCase , padding="""max_length""" , )
def __lowercase ( self : List[Any] ):
pass
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = ReformerTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
lowerCAmelCase = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [285, 46, 10, 170, 382] , )
lowerCAmelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __lowercase ( self : Dict ):
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def __lowercase ( self : Union[str, Any] ):
lowerCAmelCase = "Hello World!"
lowerCAmelCase = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def __lowercase ( self : Dict ):
lowerCAmelCase = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCAmelCase = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@require_torch
@slow
def __lowercase ( self : Any ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowerCAmelCase = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase = " ".join(lowerCAmelCase )
lowerCAmelCase = self.big_tokenizer.encode_plus(lowerCAmelCase , return_tensors="""pt""" )
lowerCAmelCase = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
lowerCAmelCase = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowerCAmelCase = encoded_sequence["input_ids"].shape
lowerCAmelCase = ReformerModel(lowerCAmelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase )
model(**lowerCAmelCase )
@slow
def __lowercase ( self : int ):
# fmt: off
lowerCAmelCase = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowerCAmelCase = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=lowerCAmelCase , sequences=lowerCAmelCase , )
| 706 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def lowercase (snake_case__ : str , snake_case__ : Tuple , snake_case__ : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase = os.path.abspath(snake_case__ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
lowerCAmelCase = tf.train.list_variables(snake_case__ )
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCAmelCase = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCAmelCase = name[1:]
# figure out how many levels deep the name is
lowerCAmelCase = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(snake_case__ )
# read data
lowerCAmelCase = tf.train.load_variable(snake_case__ , snake_case__ )
names.append("""/""".join(snake_case__ ) )
arrays.append(snake_case__ )
logger.info(f'''Read a total of {len(snake_case__ ):,} layers''' )
# Sanity check
if len(set(snake_case__ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(snake_case__ ) )})''' )
lowerCAmelCase = list(set(snake_case__ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(snake_case__ , snake_case__ ):
lowerCAmelCase = full_name.split("""/""" )
lowerCAmelCase = model
lowerCAmelCase = []
for i, m_name in enumerate(snake_case__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
lowerCAmelCase = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
lowerCAmelCase = getattr(snake_case__ , """embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
lowerCAmelCase = getattr(snake_case__ , """encoder""" )
lowerCAmelCase = getattr(snake_case__ , """layer""" )
lowerCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """pooler""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """token_type_embeddings""" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
lowerCAmelCase = getattr(snake_case__ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
lowerCAmelCase = getattr(snake_case__ , """attention""" )
lowerCAmelCase = getattr(snake_case__ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
lowerCAmelCase = getattr(snake_case__ , """attention""" )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """attention""" )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
lowerCAmelCase = getattr(snake_case__ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
lowerCAmelCase = getattr(snake_case__ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
lowerCAmelCase = getattr(snake_case__ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """intermediate""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
lowerCAmelCase = getattr(snake_case__ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
lowerCAmelCase = getattr(snake_case__ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
lowerCAmelCase = getattr(snake_case__ , """weight""" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
lowerCAmelCase = """.""".join(snake_case__ )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , snake_case__ ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , snake_case__ ):
lowerCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCAmelCase = array.transpose()
if pointer.shape == array.shape:
lowerCAmelCase = torch.from_numpy(snake_case__ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def lowercase (snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Any ) -> str:
'''simple docstring'''
logger.info(f'''Loading model based on config from {config_path}...''' )
lowerCAmelCase = BertConfig.from_json_file(snake_case__ )
lowerCAmelCase = BertModel(snake_case__ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
a = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 529 | 0 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Any = 'laion/clap-htsat-unfused'
UpperCamelCase_: Optional[Any] = tempfile.mkdtemp()
def lowerCAmelCase__ ( self : Dict , **snake_case_ : str ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : str , **snake_case_ : Dict ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def lowerCAmelCase__ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: Union[str, Any] = self.get_feature_extractor()
UpperCamelCase_: List[str] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: str = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: int = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_: Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase_: str = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
UpperCamelCase_: List[Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: Optional[int] = self.get_feature_extractor()
UpperCamelCase_: Optional[int] = self.get_tokenizer()
UpperCamelCase_: Any = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: int = floats_list((3, 1000) )
UpperCamelCase_: str = feature_extractor(snake_case_ , return_tensors="""np""" )
UpperCamelCase_: List[str] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Any = self.get_feature_extractor()
UpperCamelCase_: str = self.get_tokenizer()
UpperCamelCase_: List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: List[str] = 'This is a test string'
UpperCamelCase_: Any = processor(text=snake_case_ )
UpperCamelCase_: Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: int = self.get_feature_extractor()
UpperCamelCase_: Union[str, Any] = self.get_tokenizer()
UpperCamelCase_: Optional[int] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
UpperCamelCase_: Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_: int = processor.batch_decode(snake_case_ )
UpperCamelCase_: int = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.get_feature_extractor()
UpperCamelCase_: Optional[Any] = self.get_tokenizer()
UpperCamelCase_: int = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 548 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def UpperCAmelCase_ ( __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : int = DPTConfig()
if "large" in checkpoint_url:
_lowerCamelCase : Optional[Any] = 10_24
_lowerCamelCase : List[str] = 40_96
_lowerCamelCase : Union[str, Any] = 24
_lowerCamelCase : Any = 16
_lowerCamelCase : Union[str, Any] = [5, 11, 17, 23]
_lowerCamelCase : Optional[int] = [2_56, 5_12, 10_24, 10_24]
_lowerCamelCase : List[str] = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
_lowerCamelCase : Any = True
_lowerCamelCase : List[str] = 1_50
_lowerCamelCase : int = 'huggingface/label-files'
_lowerCamelCase : Union[str, Any] = 'ade20k-id2label.json'
_lowerCamelCase : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type='dataset' ) ) , 'r' ) )
_lowerCamelCase : Optional[Any] = {int(__a ): v for k, v in idalabel.items()}
_lowerCamelCase : int = idalabel
_lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
_lowerCamelCase : List[str] = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def UpperCAmelCase_ ( __a : Tuple ):
'''simple docstring'''
_lowerCamelCase : int = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(__a , __a )
def UpperCAmelCase_ ( __a : Dict ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCamelCase : Optional[Any] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowerCamelCase : Optional[int] = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowerCamelCase : Any = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
_lowerCamelCase : str = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowerCamelCase : int = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowerCamelCase : Tuple = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowerCamelCase : Optional[int] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowerCamelCase : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCamelCase : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
_lowerCamelCase : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCamelCase : str = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowerCamelCase : Optional[int] = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowerCamelCase : Dict = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowerCamelCase : Tuple = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowerCamelCase : Tuple = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowerCamelCase : Tuple = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowerCamelCase : List[Any] = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowerCamelCase : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCamelCase : Union[str, Any] = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
_lowerCamelCase : Optional[Any] = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowerCamelCase : str = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowerCamelCase : List[str] = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowerCamelCase : List[Any] = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowerCamelCase : Optional[Any] = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCamelCase : Tuple = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCamelCase : List[str] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCamelCase : Dict = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCamelCase : Optional[int] = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowerCamelCase : List[str] = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowerCamelCase : Union[str, Any] = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowerCamelCase : str = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowerCamelCase : List[Any] = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowerCamelCase : List[str] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowerCamelCase : Tuple = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowerCamelCase : Any = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowerCamelCase : Tuple = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowerCamelCase : str = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowerCamelCase : Union[str, Any] = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowerCamelCase : Union[str, Any] = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def UpperCAmelCase_ ( __a : Tuple , __a : Any ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Any = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
_lowerCamelCase : Dict = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Tuple = in_proj_weight[: config.hidden_size, :]
_lowerCamelCase : Optional[int] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : str = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCamelCase : Dict = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __a : Optional[int] , __a : int , __a : str , __a : List[str] ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : List[str] = get_dpt_config(__a )
# load original state_dict from URL
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(__a , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(__a )
# rename keys
for key in state_dict.copy().keys():
_lowerCamelCase : str = state_dict.pop(__a )
_lowerCamelCase : str = val
# read in qkv matrices
read_in_q_k_v(__a , __a )
# load HuggingFace model
_lowerCamelCase : List[str] = DPTForSemanticSegmentation(__a ) if 'ade' in checkpoint_url else DPTForDepthEstimation(__a )
model.load_state_dict(__a )
model.eval()
# Check outputs on an image
_lowerCamelCase : str = 4_80 if 'ade' in checkpoint_url else 3_84
_lowerCamelCase : Optional[Any] = DPTImageProcessor(size=__a )
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : str = image_processor(__a , return_tensors='pt' )
# forward pass
_lowerCamelCase : Tuple = model(**__a ).logits if 'ade' in checkpoint_url else model(**__a ).predicted_depth
# Assert logits
_lowerCamelCase : Dict = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
_lowerCamelCase : List[Any] = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(__a )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __a , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __a )
)
Path(__a ).mkdir(exist_ok=__a )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__a )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__a )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__a , )
image_processor.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__a , )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
a_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 437 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 233 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE__ : Any = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE__ : int = BASE_URL + """/user"""
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.environ.get("""USER_TOKEN""", """""")
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Dict = {
'Authorization': F'token {auth_token}',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 233 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __a (metaclass=UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = ["""flax""", """transformers"""]
def __init__( self , *_a , **_a ) -> List[str]:
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
class __a (metaclass=UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self , *_a , **_a ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
class __a (metaclass=UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = ["""flax""", """transformers"""]
def __init__( self , *_a , **_a ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
class __a (metaclass=UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = ["""flax""", """transformers"""]
def __init__( self , *_a , **_a ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
@classmethod
def _a ( cls , *_a , **_a ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""flax""", """transformers"""] )
| 680 |
"""simple docstring"""
import numpy as np
import qiskit
def _lowercase ( __lowerCAmelCase = 8 , __lowerCAmelCase = None ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] = np.random.default_rng(seed=__lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
SCREAMING_SNAKE_CASE__ : List[str] = 6 * key_len
# Measurement basis for Alice's qubits.
SCREAMING_SNAKE_CASE__ : List[Any] = rng.integers(2 , size=__lowerCAmelCase )
# The set of states Alice will prepare.
SCREAMING_SNAKE_CASE__ : Optional[Any] = rng.integers(2 , size=__lowerCAmelCase )
# Measurement basis for Bob's qubits.
SCREAMING_SNAKE_CASE__ : str = rng.integers(2 , size=__lowerCAmelCase )
# Quantum Circuit to simulate BB84
SCREAMING_SNAKE_CASE__ : Union[str, Any] = qiskit.QuantumCircuit(__lowerCAmelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
SCREAMING_SNAKE_CASE__ : str = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
SCREAMING_SNAKE_CASE__ : Optional[int] = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1 , seed_simulator=__lowerCAmelCase )
# Returns the result of measurement.
SCREAMING_SNAKE_CASE__ : int = job.result().get_counts(__lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
SCREAMING_SNAKE_CASE__ : Optional[Any] = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
SCREAMING_SNAKE_CASE__ : Optional[int] = gen_key[:key_len] if len(__lowerCAmelCase ) >= key_len else gen_key.ljust(__lowerCAmelCase , """0""" )
return key
if __name__ == "__main__":
print(f'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 680 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
snake_case_ = logging.getLogger(__name__)
@dataclass
class snake_case_ :
lowerCamelCase :str
lowerCamelCase :List[str]
lowerCamelCase :Optional[List[str]]
@dataclass
class snake_case_ :
lowerCamelCase :List[int]
lowerCamelCase :List[int]
lowerCamelCase :Optional[List[int]] = None
lowerCamelCase :Optional[List[int]] = None
class snake_case_ ( _A):
lowerCamelCase :Union[str, Any] = "train"
lowerCamelCase :List[Any] = "dev"
lowerCamelCase :Optional[int] = "test"
class snake_case_ :
@staticmethod
def __lowercase ( __lowercase , __lowercase ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def __lowercase ( __lowercase ) -> List[str]:
raise NotImplementedError
@staticmethod
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False , __lowercase="[CLS]" , __lowercase=1 , __lowercase="[SEP]" , __lowercase=False , __lowercase=False , __lowercase=0 , __lowercase=0 , __lowercase=-1_0_0 , __lowercase=0 , __lowercase=True , ) -> List[InputFeatures]:
lowerCamelCase : List[Any] ={label: i for i, label in enumerate(__lowercase )}
lowerCamelCase : List[str] =[]
for ex_index, example in enumerate(__lowercase ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d of %d''' , __lowercase , len(__lowercase ) )
lowerCamelCase : Any =[]
lowerCamelCase : Optional[Any] =[]
for word, label in zip(example.words , example.labels ):
lowerCamelCase : Optional[int] =tokenizer.tokenize(__lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__lowercase ) > 0:
tokens.extend(__lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowerCamelCase : List[Any] =tokenizer.num_special_tokens_to_add()
if len(__lowercase ) > max_seq_length - special_tokens_count:
lowerCamelCase : Any =tokens[: (max_seq_length - special_tokens_count)]
lowerCamelCase : Optional[int] =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowerCamelCase : int =[sequence_a_segment_id] * len(__lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowerCamelCase : List[str] =[cls_token] + tokens
lowerCamelCase : Any =[pad_token_label_id] + label_ids
lowerCamelCase : List[str] =[cls_token_segment_id] + segment_ids
lowerCamelCase : Dict =tokenizer.convert_tokens_to_ids(__lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowerCamelCase : Any =[1 if mask_padding_with_zero else 0] * len(__lowercase )
# Zero-pad up to the sequence length.
lowerCamelCase : Optional[Any] =max_seq_length - len(__lowercase )
if pad_on_left:
lowerCamelCase : Optional[int] =([pad_token] * padding_length) + input_ids
lowerCamelCase : int =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowerCamelCase : int =([pad_token_segment_id] * padding_length) + segment_ids
lowerCamelCase : Optional[Any] =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__lowercase ) == max_seq_length
assert len(__lowercase ) == max_seq_length
assert len(__lowercase ) == max_seq_length
assert len(__lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowerCamelCase : Optional[Any] =None
features.append(
InputFeatures(
input_ids=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , label_ids=__lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class snake_case_ ( _A):
lowerCamelCase :List[InputFeatures]
lowerCamelCase :int = nn.CrossEntropyLoss().ignore_index
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase=False , __lowercase = Split.train , ) -> List[Any]:
# Load data features from cache or dataset file
lowerCamelCase : Tuple =os.path.join(
__lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase : Optional[int] =cached_features_file + '''.lock'''
with FileLock(__lowercase ):
if os.path.exists(__lowercase ) and not overwrite_cache:
logger.info(F"Loading features from cached file {cached_features_file}" )
lowerCamelCase : List[str] =torch.load(__lowercase )
else:
logger.info(F"Creating features from dataset file at {data_dir}" )
lowerCamelCase : Optional[Any] =token_classification_task.read_examples_from_file(__lowercase , __lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCamelCase : Dict =token_classification_task.convert_examples_to_features(
__lowercase , __lowercase , __lowercase , __lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"Saving features into cached file {cached_features_file}" )
torch.save(self.features , __lowercase )
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , __lowercase ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class snake_case_ :
lowerCamelCase :List[InputFeatures]
lowerCamelCase :int = -100
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase=False , __lowercase = Split.train , ) -> Optional[Any]:
lowerCamelCase : Optional[Any] =token_classification_task.read_examples_from_file(__lowercase , __lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
lowerCamelCase : str =token_classification_task.convert_examples_to_features(
__lowercase , __lowercase , __lowercase , __lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowerCamelCase : int =tf.data.Dataset.from_generator(
__lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowerCamelCase : Optional[Any] =tf.data.Dataset.from_generator(
__lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : int =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , __lowercase ) -> InputFeatures:
return self.features[i]
| 262 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 262 | 1 |
import math
UpperCamelCase = 10
UpperCamelCase = 7
UpperCamelCase = BALLS_PER_COLOUR * NUM_COLOURS
def __magic_name__ ( SCREAMING_SNAKE_CASE = 20 ) -> str:
_lowercase : Tuple = math.comb(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , SCREAMING_SNAKE_CASE )
_lowercase : int = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 66 |
"""simple docstring"""
_snake_case = 6_5521
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[str] = 1
_a : Optional[int] = 0
for plain_chr in plain_text:
_a : Dict = (a + ord(UpperCamelCase__ )) % MOD_ADLER
_a : List[Any] = (b + a) % MOD_ADLER
return (b << 1_6) | a
| 389 | 0 |
import os
from math import logaa
def snake_case_ ( lowercase__ : str = "base_exp.txt" ):
'''simple docstring'''
_lowerCAmelCase =0
_lowerCAmelCase =0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
_lowerCAmelCase , _lowerCAmelCase =list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
_lowerCAmelCase =x * logaa(lowercase__ )
_lowerCAmelCase =i + 1
return result
if __name__ == "__main__":
print(solution())
| 701 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : str , ):
_lowerCAmelCase =parent
_lowerCAmelCase =13
_lowerCAmelCase =7
_lowerCAmelCase =True
_lowerCAmelCase =True
_lowerCAmelCase =True
_lowerCAmelCase =99
_lowerCAmelCase =32
_lowerCAmelCase =2
_lowerCAmelCase =4
_lowerCAmelCase =37
_lowerCAmelCase ="""gelu"""
_lowerCAmelCase =0.1
_lowerCAmelCase =0.1
_lowerCAmelCase =512
_lowerCAmelCase =16
_lowerCAmelCase =2
_lowerCAmelCase =0.02
_lowerCAmelCase =3
_lowerCAmelCase =4
_lowerCAmelCase =None
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase =EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Any ):
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =self.prepare_config_and_inputs()
_lowerCAmelCase =True
_lowerCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] ):
_lowerCAmelCase =TFEsmModel(config=lowerCamelCase_ )
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase =model(lowerCamelCase_ )
_lowerCAmelCase =[input_ids, input_mask]
_lowerCAmelCase =model(lowerCamelCase_ )
_lowerCAmelCase =model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] , ):
_lowerCAmelCase =True
_lowerCAmelCase =TFEsmModel(config=lowerCamelCase_ )
_lowerCAmelCase ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_lowerCAmelCase =model(lowerCamelCase_ )
_lowerCAmelCase =[input_ids, input_mask]
_lowerCAmelCase =model(lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ )
# Also check the case where encoder outputs are not passed
_lowerCAmelCase =model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ):
_lowerCAmelCase =TFEsmForMaskedLM(config=lowerCamelCase_ )
_lowerCAmelCase =model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : List[str] ):
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =TFEsmForTokenClassification(config=lowerCamelCase_ )
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
_lowerCAmelCase =model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowerCamelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: int = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
a_: Union[str, Any] = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
a_: List[str] = False
a_: List[Any] = False
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =TFEsmModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase_ )
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFEsmModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def lowerCAmelCase__ ( self : Dict ):
pass
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(lowerCamelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowerCAmelCase =model.get_bias()
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
for k, v in name.items():
assert isinstance(lowerCamelCase_ , tf.Variable )
else:
_lowerCAmelCase =model.get_output_embeddings()
assert x is None
_lowerCAmelCase =model.get_bias()
assert name is None
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_lowerCAmelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase =model(lowerCamelCase_ )[0]
_lowerCAmelCase =[1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowerCamelCase_ )
# compare the actual values for a slice.
_lowerCAmelCase =tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_lowerCAmelCase =tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_lowerCAmelCase =model(lowerCamelCase_ )[0]
# compare the actual values for a slice.
_lowerCAmelCase =tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 149 | 0 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = ["""transformers""", """torch""", """note_seq"""]
def __init__( self: List[str] , *__lowerCamelCase: Optional[int] , **__lowerCamelCase: Dict ):
'''simple docstring'''
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def UpperCAmelCase_ ( cls: Tuple , *__lowerCamelCase: List[Any] , **__lowerCamelCase: Tuple ):
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def UpperCAmelCase_ ( cls: Union[str, Any] , *__lowerCamelCase: Dict , **__lowerCamelCase: int ):
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 380 |
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Optional[int] = len(A_)
for i in range(1 ,A_):
UpperCamelCase__: List[Any] = collection[i]
UpperCamelCase__: Tuple = 0
UpperCamelCase__: Union[str, Any] = i - 1
while low <= high:
UpperCamelCase__: Any = (low + high) // 2
if val < collection[mid]:
UpperCamelCase__: str = mid - 1
else:
UpperCamelCase__: str = mid + 1
for j in range(A_ ,A_ ,-1):
UpperCamelCase__: Optional[int] = collection[j - 1]
UpperCamelCase__: Optional[Any] = val
return collection
if __name__ == "__main__":
A__: Dict = input('''Enter numbers separated by a comma:\n''').strip()
A__: Optional[int] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 380 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 1_00_00_00 ):
"""simple docstring"""
_snake_case : Tuple = set(range(3 , snake_case__ , 2 ) )
primes.add(2 )
for p in range(3 , snake_case__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case__ , snake_case__ ) ) )
_snake_case : Union[str, Any] = [float(snake_case__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case__ , limit + 1 , snake_case__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 28 |
"""simple docstring"""
import os
import sys
import unittest
A_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
A_ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a_, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case : Any = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a_, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case : Union[str, Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a_, """torch_and_transformers_and_onnx""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", a_ )
self.assertIn("""torch_and_transformers""", a_ )
self.assertIn("""flax_and_transformers""", a_ )
self.assertIn("""torch_and_transformers_and_onnx""", a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = create_dummy_object("""CONSTANT""", """'torch'""" )
self.assertEqual(a_, """\nCONSTANT = None\n""" )
_snake_case : Optional[int] = create_dummy_object("""function""", """'torch'""" )
self.assertEqual(
a_, """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
_snake_case : List[Any] = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
_snake_case : Union[str, Any] = create_dummy_object("""FakeClass""", """'torch'""" )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
_snake_case : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], a_ )
| 28 | 1 |
'''simple docstring'''
import random
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : bool = False ):
__a : dict = {i: [] for i in range(_SCREAMING_SNAKE_CASE )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_SCREAMING_SNAKE_CASE )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if random.random() < probability:
graph[i].append(_SCREAMING_SNAKE_CASE )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_SCREAMING_SNAKE_CASE )
return graph
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
return {
i: [j for j in range(_SCREAMING_SNAKE_CASE ) if i != j] for i in range(_SCREAMING_SNAKE_CASE )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 476 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : bool , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : float ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
return min(
minimax(depth + 1 , node_index * 2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
def lowerCamelCase ():
__a : Optional[int] = [90, 23, 6, 33, 21, 65, 123, 34_423]
__a : List[str] = math.log(len(_SCREAMING_SNAKE_CASE ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 476 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
a = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
a = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[List[List[str]]] , UpperCamelCase__ : List[List[str]] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase__ , hypotheses=UpperCamelCase__ , min_len=UpperCamelCase__ , max_len=UpperCamelCase__ )
}
| 650 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,)
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCamelCase__ )
return config
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowercase_ = None
else:
lowercase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
| 650 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
a__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__UpperCamelCase : Optional[Any] = question_encoder
__UpperCamelCase : int = generator
__UpperCamelCase : Optional[Any] = self.question_encoder
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if os.path.isfile(a__ ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(a__ , exist_ok=a__ )
__UpperCamelCase : Tuple = os.path.join(a__ , """question_encoder_tokenizer""" )
__UpperCamelCase : Tuple = os.path.join(a__ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(a__ )
self.generator.save_pretrained(a__ )
@classmethod
def lowerCamelCase__ ( cls : Tuple , lowerCAmelCase : Any , **lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
__UpperCamelCase : List[Any] = kwargs.pop("""config""" , a__ )
if config is None:
__UpperCamelCase : List[str] = RagConfig.from_pretrained(a__ )
__UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(
a__ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__UpperCamelCase : int = AutoTokenizer.from_pretrained(
a__ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=a__ , generator=a__ )
def __call__( self : str , *lowerCAmelCase : int , **lowerCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.current_tokenizer(*a__ , **a__ )
def lowerCamelCase__ ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str ) -> Tuple:
"""simple docstring"""
return self.generator.batch_decode(*a__ , **a__ )
def lowerCamelCase__ ( self : List[str] , *lowerCAmelCase : str , **lowerCAmelCase : List[str] ) -> Dict:
"""simple docstring"""
return self.generator.decode(*a__ , **a__ )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : Any = self.question_encoder
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : Dict = self.generator
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[List[str]] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str = "longest" , lowerCAmelCase : str = None , lowerCAmelCase : bool = True , **lowerCAmelCase : List[Any] , ) -> BatchEncoding:
"""simple docstring"""
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , a__ , )
if max_length is None:
__UpperCamelCase : Any = self.current_tokenizer.model_max_length
__UpperCamelCase : str = self(
a__ , add_special_tokens=a__ , return_tensors=a__ , max_length=a__ , padding=a__ , truncation=a__ , **a__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__UpperCamelCase : List[str] = self.current_tokenizer.model_max_length
__UpperCamelCase : Optional[Any] = self(
text_target=a__ , add_special_tokens=a__ , return_tensors=a__ , padding=a__ , max_length=a__ , truncation=a__ , **a__ , )
__UpperCamelCase : str = labels['''input_ids''']
return model_inputs
| 279 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase ( snake_case : int , snake_case : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase ( snake_case : int ):
_lowerCAmelCase:Optional[Any] = []
_lowerCAmelCase:Dict = 11
_lowerCAmelCase:int = int('''1''' + '''0''' * digit_len )
for num in range(snake_case , snake_case ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(snake_case , snake_case ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
_lowerCAmelCase:Optional[Any] = 10
return solutions
def UpperCAmelCase ( snake_case : int = 2 ):
_lowerCAmelCase:Optional[int] = 1.0
for fraction in fraction_list(snake_case ):
_lowerCAmelCase:Any = Fraction(snake_case )
result *= frac.denominator / frac.numerator
return int(snake_case )
if __name__ == "__main__":
print(solution())
| 227 | 0 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = ["""image_processor""", """tokenizer"""]
lowercase_ = """FlavaImageProcessor"""
lowercase_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__ ):
'''simple docstring'''
__A =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase__ , )
__A =kwargs.pop('''feature_extractor''' )
__A =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ , lowercase__ )
__A =self.image_processor
def __call__( self , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = False , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__A =self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
if images is not None:
__A =self.image_processor(
lowercase__ , return_image_mask=lowercase__ , return_codebook_pixels=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
if text is not None and images is not None:
encoding.update(lowercase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase__ ) , tensor_type=lowercase__ )
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.tokenizer.model_input_names
__A =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase__ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase__ , )
return self.image_processor
| 516 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
def __init__( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 516 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class lowercase__ ( A_ ):
__UpperCAmelCase = '''switch_transformers'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , SCREAMING_SNAKE_CASE=3_2128 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0.01 , SCREAMING_SNAKE_CASE="float32" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0_01 , SCREAMING_SNAKE_CASE=0.0_01 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , **SCREAMING_SNAKE_CASE , ) -> Any:
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : str = d_model
_lowerCamelCase : Tuple = d_kv
_lowerCamelCase : Any = d_ff
_lowerCamelCase : str = num_sparse_encoder_layers
_lowerCamelCase : Tuple = num_layers
_lowerCamelCase : List[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCamelCase : Optional[int] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_lowerCamelCase : Union[str, Any] = self.num_layers // self.num_sparse_encoder_layers
else:
_lowerCamelCase : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_lowerCamelCase : str = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_lowerCamelCase : Tuple = self.num_decoder_layers # HACK: this will create 0 sparse layers
_lowerCamelCase : Dict = num_heads
_lowerCamelCase : List[Any] = num_experts
_lowerCamelCase : Dict = expert_capacity
_lowerCamelCase : Tuple = router_bias
_lowerCamelCase : Any = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}')
_lowerCamelCase : List[Any] = router_dtype
_lowerCamelCase : List[Any] = router_ignore_padding_tokens
_lowerCamelCase : Optional[int] = relative_attention_num_buckets
_lowerCamelCase : List[str] = relative_attention_max_distance
_lowerCamelCase : Tuple = dropout_rate
_lowerCamelCase : List[Any] = layer_norm_epsilon
_lowerCamelCase : Dict = initializer_factor
_lowerCamelCase : List[Any] = feed_forward_proj
_lowerCamelCase : Optional[Any] = use_cache
_lowerCamelCase : str = add_router_probs
_lowerCamelCase : Optional[Any] = router_z_loss_coef
_lowerCamelCase : int = router_aux_loss_coef
_lowerCamelCase : Union[str, Any] = self.feed_forward_proj.split("""-""")
_lowerCamelCase : Any = act_info[-1]
_lowerCamelCase : Any = act_info[0] == """gated"""
if len(SCREAMING_SNAKE_CASE) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowerCamelCase : Dict = """gelu_new"""
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
| 88 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 61 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : int ,*_SCREAMING_SNAKE_CASE : int ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Optional[Any] ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Tuple ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Any ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : str ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Optional[int] ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[int] ,*_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Optional[int] ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : Tuple ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Optional[int] ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : int ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Any ,*_SCREAMING_SNAKE_CASE : int ,**_SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : List[str] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : Tuple ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Optional[Any] ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : Tuple ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[Any] ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Optional[Any] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Dict ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Tuple ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : Any ) -> Any:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : List[str] ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Tuple ,*_SCREAMING_SNAKE_CASE : int ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : List[Any] ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : str ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Dict ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[int] ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Any ,*_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Any ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Dict ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Dict ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : Any ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Optional[int] ,*_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Any ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Optional[Any] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : int ,*_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : Dict ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
def snake_case ( *UpperCAmelCase : Tuple, **UpperCAmelCase : Union[str, Any] ):
requires_backends(UpperCAmelCase, ['torch'] )
def snake_case ( *UpperCAmelCase : Optional[int], **UpperCAmelCase : Union[str, Any] ):
requires_backends(UpperCAmelCase, ['torch'] )
def snake_case ( *UpperCAmelCase : List[Any], **UpperCAmelCase : Union[str, Any] ):
requires_backends(UpperCAmelCase, ['torch'] )
def snake_case ( *UpperCAmelCase : int, **UpperCAmelCase : Optional[int] ):
requires_backends(UpperCAmelCase, ['torch'] )
def snake_case ( *UpperCAmelCase : Optional[Any], **UpperCAmelCase : Optional[int] ):
requires_backends(UpperCAmelCase, ['torch'] )
def snake_case ( *UpperCAmelCase : Any, **UpperCAmelCase : str ):
requires_backends(UpperCAmelCase, ['torch'] )
def snake_case ( *UpperCAmelCase : Dict, **UpperCAmelCase : List[str] ):
requires_backends(UpperCAmelCase, ['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : List[Any] ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : Any ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : str ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Tuple ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : str ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[int] ,*_SCREAMING_SNAKE_CASE : int ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : List[str] ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : str ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Optional[Any] ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Any ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Tuple ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : Any ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : int ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Optional[int] ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : List[str] ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Any ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : int ) -> str:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Tuple ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : Any ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : str ,*_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : List[str] ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : List[str] ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Dict ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : str ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : int ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : int ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Dict ,*_SCREAMING_SNAKE_CASE : int ,**_SCREAMING_SNAKE_CASE : int ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : int ,**_SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Tuple ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Any ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Dict ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : int ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[int] ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : int ) -> str:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : int ,**_SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Any ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Any ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Any ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[Any] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Tuple ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : str ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : int ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Dict ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : int ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Any ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : Dict ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : List[Any] ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Any ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Optional[Any] ,*_SCREAMING_SNAKE_CASE : int ,**_SCREAMING_SNAKE_CASE : Any ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : List[Any] ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : int ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : str ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : str ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : List[str] ,*_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Dict ,*_SCREAMING_SNAKE_CASE : int ,**_SCREAMING_SNAKE_CASE : str ) -> Dict:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Optional[Any] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : List[str] ,*_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : List[str] ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[int] ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : int ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : List[Any] ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : str ,*_SCREAMING_SNAKE_CASE : int ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : List[Any] ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[Any] ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : Any ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Dict ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : List[Any] ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[Any] ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : int ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Tuple ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : List[str] ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : str ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Any ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : Dict ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Dict ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : List[str] ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Tuple ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Any ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : List[Any] ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : str ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : List[str] ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : Dict ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : List[str] ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Optional[Any] ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : List[str] ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[int] ,*_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Dict ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Dict ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[Any] ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : int ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : str ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : str ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Any ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : str ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Dict ,*_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Tuple ,*_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : str ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Optional[int] ,*_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : List[str] ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Optional[Any] ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[Any] ,*_SCREAMING_SNAKE_CASE : int ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Dict ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Optional[Any] ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Dict ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : Dict ) -> str:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : List[Any] ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Any:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Optional[Any] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : str ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : int ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : List[str] ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : Any ) -> Tuple:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Optional[int] ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : Any ) -> Tuple:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Tuple ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Tuple ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[Any] ,*_SCREAMING_SNAKE_CASE : Union[str, Any] ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : List[Any] ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Optional[Any] ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : str ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : List[str] ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : str ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : Any ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Optional[int] ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[int] ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : List[Any] ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : Dict ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : List[Any] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : Optional[Any] ,**_SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : int ,*_SCREAMING_SNAKE_CASE : Dict ,**_SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Optional[Any] ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Optional[int] ,*_SCREAMING_SNAKE_CASE : List[str] ,**_SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : str ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Any:
'''simple docstring'''
requires_backends(cls ,['torch'] )
class UpperCamelCase ( metaclass=snake_case__ ):
"""simple docstring"""
snake_case = ["torch"]
def __init__( self : Union[str, Any] ,*_SCREAMING_SNAKE_CASE : Any ,**_SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(self ,['torch'] )
@classmethod
def A( cls : Any ,*_SCREAMING_SNAKE_CASE : str ,**_SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
@classmethod
def A( cls : str ,*_SCREAMING_SNAKE_CASE : Tuple ,**_SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls ,['torch'] )
| 110 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
lowerCAmelCase_ = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def snake_case ( UpperCAmelCase : str = "mumbai" ):
A = BeautifulSoup(requests.get(url + location ).content, 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div', attrs={'data-tn-component': 'organicJob'} ):
A = job.find('a', attrs={'data-tn-element': 'jobTitle'} ).text.strip()
A = job.find('span', {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
| 110 | 1 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case , ) -> Dict:
super().__init__()
_UpperCAmelCase = value_function
_UpperCAmelCase = unet
_UpperCAmelCase = scheduler
_UpperCAmelCase = env
_UpperCAmelCase = env.get_dataset()
_UpperCAmelCase = {}
for key in self.data.keys():
try:
_UpperCAmelCase = self.data[key].mean()
except: # noqa: E722
pass
_UpperCAmelCase = {}
for key in self.data.keys():
try:
_UpperCAmelCase = self.data[key].std()
except: # noqa: E722
pass
_UpperCAmelCase = env.observation_space.shape[0]
_UpperCAmelCase = env.action_space.shape[0]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
return (x_in - self.means[key]) / self.stds[key]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[Any]:
return x_in * self.stds[key] + self.means[key]
def lowerCamelCase_ ( self , snake_case ) -> Any:
if type(snake_case ) is dict:
return {k: self.to_torch(snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(snake_case , device=self.unet.device )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> int:
for key, val in cond.items():
_UpperCAmelCase = val.clone()
return x_in
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> Tuple:
_UpperCAmelCase = x.shape[0]
_UpperCAmelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_UpperCAmelCase = torch.full((batch_size,) , snake_case , device=self.unet.device , dtype=torch.long )
for _ in range(snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_UpperCAmelCase = self.value_function(x.permute(0 , 2 , 1 ) , snake_case ).sample
_UpperCAmelCase = torch.autograd.grad([y.sum()] , [x] )[0]
_UpperCAmelCase = self.scheduler._get_variance(snake_case )
_UpperCAmelCase = torch.exp(0.5 * posterior_variance )
_UpperCAmelCase = model_std * grad
_UpperCAmelCase = 0
_UpperCAmelCase = x.detach()
_UpperCAmelCase = x + scale * grad
_UpperCAmelCase = self.reset_xa(snake_case , snake_case , self.action_dim )
_UpperCAmelCase = self.unet(x.permute(0 , 2 , 1 ) , snake_case ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_UpperCAmelCase = self.scheduler.step(snake_case , snake_case , snake_case , predict_epsilon=snake_case )['prev_sample']
# apply conditions to the trajectory (set the initial state)
_UpperCAmelCase = self.reset_xa(snake_case , snake_case , self.action_dim )
_UpperCAmelCase = self.to_torch(snake_case )
return x, y
def __call__( self , snake_case , snake_case=64 , snake_case=32 , snake_case=2 , snake_case=0.1 ) -> str:
# normalize the observations and create batch dimension
_UpperCAmelCase = self.normalize(snake_case , 'observations' )
_UpperCAmelCase = obs[None].repeat(snake_case , axis=0 )
_UpperCAmelCase = {0: self.to_torch(snake_case )}
_UpperCAmelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_UpperCAmelCase = randn_tensor(snake_case , device=self.unet.device )
_UpperCAmelCase = self.reset_xa(snake_case , snake_case , self.action_dim )
_UpperCAmelCase = self.to_torch(snake_case )
# run the diffusion process
_UpperCAmelCase , _UpperCAmelCase = self.run_diffusion(snake_case , snake_case , snake_case , snake_case )
# sort output trajectories by value
_UpperCAmelCase = y.argsort(0 , descending=snake_case ).squeeze()
_UpperCAmelCase = x[sorted_idx]
_UpperCAmelCase = sorted_values[:, :, : self.action_dim]
_UpperCAmelCase = actions.detach().cpu().numpy()
_UpperCAmelCase = self.de_normalize(snake_case , key='actions' )
# select the action with the highest value
if y is not None:
_UpperCAmelCase = 0
else:
# if we didn't run value guiding, select a random action
_UpperCAmelCase = np.random.randint(0 , snake_case )
_UpperCAmelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 573 |
"""simple docstring"""
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def UpperCAmelCase ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 573 | 1 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = {'''vocab_file''': '''spiece.model'''}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a = ["input_ids", "attention_mask"]
a = []
def __init__( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int="<unk>" , __lowerCamelCase : str="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : List[Any]="<pad>" , __lowerCamelCase : Any="[SEP]" , __lowerCamelCase : str="[MASK]" , __lowerCamelCase : Optional[int]="[CLS]" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Union[str, Any] , ) -> None:
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , sep_token=__lowerCamelCase , mask_token=__lowerCamelCase , cls_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def lowercase_ ( self : Tuple ) -> Tuple:
return self.sp_model.get_piece_size()
def lowercase_ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
return state
def __setstate__( self : int , __lowerCamelCase : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Any , __lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def lowercase_ ( self : Dict , __lowerCamelCase : List[Any] ) -> Optional[int]:
return self.sp_model.piece_to_id(__lowerCamelCase )
def lowercase_ ( self : str , __lowerCamelCase : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.sp_model.IdToPiece(__lowerCamelCase )
return token
def lowercase_ ( self : Tuple , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = ''''''
SCREAMING_SNAKE_CASE__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = []
else:
current_sub_tokens.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def lowercase_ ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : bool = False , __lowerCamelCase : bool = None , __lowerCamelCase : bool = True , **__lowerCamelCase : Union[str, Any] , ) -> str:
SCREAMING_SNAKE_CASE__ = kwargs.pop('''use_source_tokenizer''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.convert_ids_to_tokens(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = []
sub_texts.append(__lowerCamelCase )
else:
current_sub_text.append(__lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
SCREAMING_SNAKE_CASE__ = re.sub(r''' (\[(MASK|SEP)\])''' , r'''\1''' , ''' '''.join(__lowerCamelCase ) )
else:
SCREAMING_SNAKE_CASE__ = ''''''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE__ = self.clean_up_tokenization(__lowerCamelCase )
return clean_text
else:
return text
def lowercase_ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
def lowercase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 472 |
from importlib import import_module
from .logging import get_logger
_SCREAMING_SNAKE_CASE : Optional[int] = get_logger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=None ) -> int:
SCREAMING_SNAKE_CASE__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = module._original_module if isinstance(__lowerCamelCase , _PatchedModuleObj ) else module
class UpperCAmelCase__ :
"""simple docstring"""
a = []
def __init__( self : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=None ) -> Any:
SCREAMING_SNAKE_CASE__ = obj
SCREAMING_SNAKE_CASE__ = target
SCREAMING_SNAKE_CASE__ = new
SCREAMING_SNAKE_CASE__ = target.split('''.''' )[0]
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = attrs or []
def __enter__( self : int ) -> Tuple:
*SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(__lowerCamelCase ) ):
try:
SCREAMING_SNAKE_CASE__ = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
SCREAMING_SNAKE_CASE__ = getattr(self.obj , __lowerCamelCase )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(__lowerCamelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
SCREAMING_SNAKE_CASE__ = obj_attr
# patch at top level
setattr(self.obj , __lowerCamelCase , _PatchedModuleObj(__lowerCamelCase , attrs=self.attrs ) )
SCREAMING_SNAKE_CASE__ = getattr(self.obj , __lowerCamelCase )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(__lowerCamelCase , __lowerCamelCase , _PatchedModuleObj(getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , attrs=self.attrs ) )
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , __lowerCamelCase )
# finally set the target attribute
setattr(__lowerCamelCase , __lowerCamelCase , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
SCREAMING_SNAKE_CASE__ = getattr(import_module('''.'''.join(__lowerCamelCase ) ) , __lowerCamelCase )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , __lowerCamelCase ) is attr_value:
SCREAMING_SNAKE_CASE__ = getattr(self.obj , __lowerCamelCase )
setattr(self.obj , __lowerCamelCase , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
SCREAMING_SNAKE_CASE__ = globals()['''__builtins__'''][target_attr]
setattr(self.obj , __lowerCamelCase , self.new )
else:
raise RuntimeError(f'''Tried to patch attribute {target_attr} instead of a submodule.''' )
def __exit__( self : Optional[Any] , *__lowerCamelCase : Tuple ) -> List[str]:
for attr in list(self.original ):
setattr(self.obj , __lowerCamelCase , self.original.pop(__lowerCamelCase ) )
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
self.__enter__()
self._active_patches.append(self )
def lowercase_ ( self : int ) -> int:
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 472 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
# See all MVP models at https://huggingface.co/models?filter=mvp
__A = {
"vocab_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json",
},
"added_tokens.json": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json",
},
"merges_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt",
},
"tokenizer_file": {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json",
},
}
__A = {
"RUCAIBox/mvp": 1_024,
}
class A ( __UpperCAmelCase ):
lowerCamelCase : List[str] = VOCAB_FILES_NAMES
lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Tuple = ["""input_ids""", """attention_mask"""]
lowerCamelCase : Dict = MvpTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , lowerCamelCase__=True , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , )
lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCamelCase__ ) != add_prefix_space:
lowercase__ = getattr(lowerCamelCase__ , pre_tok_state.pop("""type""" ) )
lowercase__ = add_prefix_space
lowercase__ = pre_tok_class(**lowerCamelCase__ )
lowercase__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase__ = """post_processor"""
lowercase__ = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
if tokenizer_component_instance:
lowercase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase__ = tuple(state["""sep"""] )
if "cls" in state:
lowercase__ = tuple(state["""cls"""] )
lowercase__ = False
if state.get("""add_prefix_space""" , lowerCamelCase__ ) != add_prefix_space:
lowercase__ = add_prefix_space
lowercase__ = True
if state.get("""trim_offsets""" , lowerCamelCase__ ) != trim_offsets:
lowercase__ = trim_offsets
lowercase__ = True
if changes_to_apply:
lowercase__ = getattr(lowerCamelCase__ , state.pop("""type""" ) )
lowercase__ = component_class(**lowerCamelCase__ )
setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ )
@property
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value
lowercase__ = value
def A__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__ = kwargs.get("""is_split_into_words""" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def A__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> BatchEncoding:
'''simple docstring'''
lowercase__ = kwargs.get("""is_split_into_words""" , lowerCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
lowercase__ = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 325 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
__A = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
__A = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__A = BeautifulSoup(res.text, "html.parser")
__A = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''')
| 325 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _snake_case ( a__ ):
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase , "num_attention_heads" ) )
class _snake_case :
def __init__( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : Any=64 , UpperCAmelCase : str=3 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Dict=1 , UpperCAmelCase : Tuple=16 , UpperCAmelCase : Optional[int]=[128, 256, 384] , UpperCAmelCase : str=[4, 6, 8] , UpperCAmelCase : Tuple=[2, 3, 4] , UpperCAmelCase : Dict=[16, 16, 16] , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : List[Any]=[2, 2, 2] , UpperCAmelCase : Dict=[2, 2, 2] , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[Any]=2 , ):
__lowerCamelCase : str = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Dict = image_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : Any = kernel_size
__lowerCamelCase : Optional[Any] = stride
__lowerCamelCase : Optional[Any] = padding
__lowerCamelCase : List[str] = hidden_sizes
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Optional[Any] = depths
__lowerCamelCase : Dict = key_dim
__lowerCamelCase : Union[str, Any] = drop_path_rate
__lowerCamelCase : List[str] = patch_size
__lowerCamelCase : Any = attention_ratio
__lowerCamelCase : Optional[int] = mlp_ratio
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Tuple = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : Optional[int] = use_labels
__lowerCamelCase : Optional[Any] = num_labels
__lowerCamelCase : Union[str, Any] = initializer_range
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Optional[int] = None
if self.use_labels:
__lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[Any] ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ):
__lowerCamelCase : Union[str, Any] = LevitModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : List[str] = model(UpperCAmelCase )
__lowerCamelCase : int = (self.image_size, self.image_size)
__lowerCamelCase , __lowerCamelCase : Dict = image_size[0], image_size[1]
for _ in range(4 ):
__lowerCamelCase : Union[str, Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
__lowerCamelCase : Optional[int] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : str ):
__lowerCamelCase : str = self.num_labels
__lowerCamelCase : Tuple = LevitForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__lowerCamelCase : Any = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = config_and_inputs
__lowerCamelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , unittest.TestCase ):
snake_case__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
snake_case__ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : str = LevitModelTester(self )
__lowerCamelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def lowerCamelCase__ ( self : str ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : str ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def lowerCamelCase__ ( self : List[str] ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def lowerCamelCase__ ( self : str ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def lowerCamelCase__ ( self : List[Any] ):
pass
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Tuple = model_class(UpperCAmelCase )
__lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
__lowerCamelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def lowerCamelCase__ ( self : Tuple ):
def check_hidden_states_output(UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : Optional[Any] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
__lowerCamelCase : List[str] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase : str = outputs.hidden_states
__lowerCamelCase : List[Any] = len(self.model_tester.depths ) + 1
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
__lowerCamelCase : Dict = (self.model_tester.image_size, self.model_tester.image_size)
__lowerCamelCase , __lowerCamelCase : Tuple = image_size[0], image_size[1]
for _ in range(4 ):
__lowerCamelCase : List[str] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
__lowerCamelCase : Any = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : List[Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCamelCase__ ( self : Optional[int] ):
pass
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : str=False ):
__lowerCamelCase : Tuple = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def lowerCamelCase__ ( self : Any ):
if not self.model_tester.is_training:
return
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : int = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
__lowerCamelCase : Optional[int] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
__lowerCamelCase : Any = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = model(**UpperCAmelCase ).loss
loss.backward()
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCamelCase : int = False
__lowerCamelCase : int = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
__lowerCamelCase : Optional[int] = model_class(UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(UpperCAmelCase )
model.train()
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
__lowerCamelCase : Optional[int] = model(**UpperCAmelCase ).loss
loss.backward()
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Tuple = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
__lowerCamelCase : List[str] = problem_type["title"]
__lowerCamelCase : List[str] = problem_type["num_labels"]
__lowerCamelCase : Union[str, Any] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
__lowerCamelCase : Dict = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if problem_type["num_labels"] > 1:
__lowerCamelCase : Tuple = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
__lowerCamelCase : List[Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCAmelCase ) as warning_list:
__lowerCamelCase : str = model(**UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowerCamelCase__ ( self : int ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = LevitModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase_ ( ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Optional[int] ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : Optional[int] = prepare_img()
__lowerCamelCase : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
__lowerCamelCase : List[str] = model(**UpperCAmelCase )
# verify the logits
__lowerCamelCase : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
__lowerCamelCase : Optional[Any] = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1E-4 ) ) | 366 | """simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__A = logging.get_logger('''transformers.models.speecht5''')
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: List[Any] , _lowerCamelCase: Tuple ) -> Union[str, Any]:
'''simple docstring'''
hf_model.apply_weight_norm()
__lowerCamelCase : Union[str, Any] = checkpoint["input_conv.weight_g"]
__lowerCamelCase : int = checkpoint["input_conv.weight_v"]
__lowerCamelCase : Dict = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
__lowerCamelCase : Dict = checkpoint[F"""upsamples.{i}.1.weight_g"""]
__lowerCamelCase : Optional[int] = checkpoint[F"""upsamples.{i}.1.weight_v"""]
__lowerCamelCase : Any = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__lowerCamelCase : List[str] = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
__lowerCamelCase : str = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
__lowerCamelCase : Union[str, Any] = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
__lowerCamelCase : str = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
__lowerCamelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
__lowerCamelCase : int = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
__lowerCamelCase : Any = checkpoint["output_conv.1.weight_g"]
__lowerCamelCase : Tuple = checkpoint["output_conv.1.weight_v"]
__lowerCamelCase : int = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: Dict , _lowerCamelCase: Optional[int] , _lowerCamelCase: Any=None , _lowerCamelCase: Optional[int]=None , ) -> Optional[int]:
'''simple docstring'''
if config_path is not None:
__lowerCamelCase : Dict = SpeechTaHifiGanConfig.from_pretrained(_lowerCamelCase )
else:
__lowerCamelCase : Tuple = SpeechTaHifiGanConfig()
__lowerCamelCase : List[Any] = SpeechTaHifiGan(_lowerCamelCase )
__lowerCamelCase : int = torch.load(_lowerCamelCase )
load_weights(orig_checkpoint["model"]["generator"] , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Dict = np.load(_lowerCamelCase )
__lowerCamelCase : List[str] = stats[0].reshape(-1 )
__lowerCamelCase : Optional[int] = stats[1].reshape(-1 )
__lowerCamelCase : int = torch.from_numpy(_lowerCamelCase ).float()
__lowerCamelCase : List[str] = torch.from_numpy(_lowerCamelCase ).float()
model.save_pretrained(_lowerCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__A = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 366 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: Tuple = KandinskyVaaImgaImgPipeline
_lowercase: Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''']
_lowercase: int = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowercase: Tuple = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase: Any = False
@property
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
return 32
@property
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
return 32
@property
def lowercase__ ( self : List[Any] ) -> List[Any]:
return self.time_input_dim
@property
def lowercase__ ( self : List[str] ) -> List[Any]:
return self.time_input_dim * 4
@property
def lowercase__ ( self : str ) -> Dict:
return 1_00
@property
def lowercase__ ( self : List[str] ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase = UNetaDConditionModel(**__snake_case )
return model
@property
def lowercase__ ( self : str ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Any ) -> Dict:
torch.manual_seed(0 )
_lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
_lowerCAmelCase = self.dummy_unet
_lowerCAmelCase = self.dummy_movq
_lowerCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase = DDIMScheduler(**__snake_case )
_lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : Tuple , __snake_case : Optional[int] , __snake_case : Union[str, Any]=0 ) -> int:
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__snake_case ) ).to(__snake_case )
_lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__snake_case )
# create init_image
_lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase = Image.fromarray(np.uinta(__snake_case ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__snake_case ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(__snake_case )
else:
_lowerCAmelCase = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : int ) -> Optional[int]:
_lowerCAmelCase = """cpu"""
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**__snake_case )
_lowerCAmelCase = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(__snake_case ) )
_lowerCAmelCase = output.images
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
_lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Any ) -> int:
_lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase = """A red cartoon frog, 4k"""
_lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
_lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_lowerCAmelCase = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
_lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase = pipeline(
image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 207 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCAmelCase ( snake_case_ ):
def __init__( self : Union[str, Any] , __snake_case : Callable , __snake_case : Optional[Features] = None , __snake_case : str = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[dict] = None , __snake_case : Optional[int] = None , **__snake_case : Optional[Any] , ) -> Union[str, Any]:
super().__init__(
features=__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case , streaming=__snake_case , num_proc=__snake_case , **__snake_case , )
_lowerCAmelCase = Generator(
cache_dir=__snake_case , features=__snake_case , generator=__snake_case , gen_kwargs=__snake_case , **__snake_case , )
def lowercase__ ( self : str ) -> Dict:
# Build iterable dataset
if self.streaming:
_lowerCAmelCase = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
self.builder.download_and_prepare(
download_config=__snake_case , download_mode=__snake_case , verification_mode=__snake_case , base_path=__snake_case , num_proc=self.num_proc , )
_lowerCAmelCase = self.builder.as_dataset(
split="""train""" , verification_mode=__snake_case , in_memory=self.keep_in_memory )
return dataset
| 207 | 1 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = len([g for position, g in enumerate(SCREAMING_SNAKE_CASE ) if g == main_target[position]] )
return (item, float(SCREAMING_SNAKE_CASE ))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : str = random.randint(0 , len(SCREAMING_SNAKE_CASE ) - 1 )
A_ : Dict = parent_a[:random_slice] + parent_a[random_slice:]
A_ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = list(SCREAMING_SNAKE_CASE )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
A_ : str = random.choice(SCREAMING_SNAKE_CASE )
return "".join(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
A_ : List[str] = []
# Generate more children proportionally to the fitness score.
A_ : Any = int(parent_a[1] * 100 ) + 1
A_ : Dict = 10 if child_n >= 10 else child_n
for _ in range(SCREAMING_SNAKE_CASE ):
A_ : List[Any] = population_score[random.randint(0 , SCREAMING_SNAKE_CASE )][0]
A_ , A_ : str = crossover(parent_a[0] , SCREAMING_SNAKE_CASE )
# Append new string to the population list.
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
pop.append(mutate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return pop
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
A_ : Union[str, Any] = f'''{N_POPULATION} must be bigger than {N_SELECTED}'''
raise ValueError(SCREAMING_SNAKE_CASE )
# Verify that the target contains no genes besides the ones inside genes variable.
A_ : str = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
A_ : Any = f'''{not_in_genes_list} is not in genes list, evolution cannot converge'''
raise ValueError(SCREAMING_SNAKE_CASE )
# Generate random starting population.
A_ : int = []
for _ in range(SCREAMING_SNAKE_CASE ):
population.append(''''''.join([random.choice(SCREAMING_SNAKE_CASE ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) )
# Just some logs to know what the algorithms is doing.
A_ , A_ : Dict = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(SCREAMING_SNAKE_CASE )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
A_ : str = [evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for item in population]
# Check if there is a matching evolution.
A_ : int = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] , reverse=SCREAMING_SNAKE_CASE )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'''\nGeneration: {generation}'''
f'''\nTotal Population:{total_population}'''
f'''\nBest score: {population_score[0][1]}'''
f'''\nBest string: {population_score[0][0]}''' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
A_ : Any = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(SCREAMING_SNAKE_CASE )
# Normalize population score to be between 0 and 1.
A_ : int = [
(item, score / len(SCREAMING_SNAKE_CASE )) for item, score in population_score
]
# This is selection
for i in range(SCREAMING_SNAKE_CASE ):
population.extend(select(population_score[int(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(SCREAMING_SNAKE_CASE ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCamelCase , UpperCamelCase , UpperCamelCase = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 152 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = DiTPipeline
snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case = False
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ : str = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
A_ : Union[str, Any] = AutoencoderKL()
A_ : Optional[Any] = DDIMScheduler()
A_ : str = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )->Union[str, Any]:
'''simple docstring'''
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A_ : Any = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
A_ : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Optional[int] = '''cpu'''
A_ : Any = self.get_dummy_components()
A_ : Optional[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
A_ : str = pipe(**_SCREAMING_SNAKE_CASE ).images
A_ : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
A_ : Tuple = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
A_ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _snake_case ( self )->int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = torch.manual_seed(0 )
A_ : int = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
A_ : Any = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
A_ : Any = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
A_ : List[str] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : List[Any] = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def _snake_case ( self )->str:
'''simple docstring'''
A_ : Tuple = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
A_ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
A_ : List[str] = ['''vase''', '''umbrella''']
A_ : List[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Tuple = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 152 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar('T')
class _lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> List[str]:
_snake_case = data
_snake_case = None
def __str__(self ) -> str:
return f"""{self.data}"""
class _lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__(self ) -> None:
_snake_case = None
def __iter__(self ) -> Iterator[T]:
_snake_case = self.top
while node:
yield node.data
_snake_case = node.next
def __str__(self ) -> str:
return "->".join([str(UpperCAmelCase ) for item in self] )
def __len__(self ) -> int:
return len(tuple(iter(self ) ) )
def lowercase (self ) -> bool:
return self.top is None
def lowercase (self , UpperCAmelCase ) -> None:
_snake_case = Node(UpperCAmelCase )
if not self.is_empty():
_snake_case = self.top
_snake_case = node
def lowercase (self ) -> T:
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , UpperCAmelCase )
_snake_case = self.top
_snake_case = self.top.next
return pop_node.data
def lowercase (self ) -> T:
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def lowercase (self ) -> None:
_snake_case = None
if __name__ == "__main__":
from doctest import testmod
testmod() | 585 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "PIL.Image.Image"
lowerCAmelCase_ = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowerCAmelCase_ = field(default="Image" , init=__snake_case , repr=__snake_case )
def __call__(self ) -> Optional[int]:
return self.pa_type
def lowercase (self , UpperCAmelCase ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_snake_case = np.array(UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(UpperCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(UpperCAmelCase )
elif isinstance(UpperCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(UpperCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def lowercase (self , UpperCAmelCase , UpperCAmelCase=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
_snake_case = {}
_snake_case, _snake_case = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(UpperCAmelCase ):
_snake_case = PIL.Image.open(UpperCAmelCase )
else:
_snake_case = path.split("""::""" )[-1]
try:
_snake_case = string_to_dict(UpperCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
_snake_case = token_per_repo_id.get(UpperCAmelCase )
except ValueError:
_snake_case = None
with xopen(UpperCAmelCase , """rb""" , use_auth_token=UpperCAmelCase ) as f:
_snake_case = BytesIO(f.read() )
_snake_case = PIL.Image.open(bytes_ )
else:
_snake_case = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase (self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def lowercase (self , UpperCAmelCase ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
_snake_case = pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() )
_snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_snake_case = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
_snake_case = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
_snake_case = storage.field("""bytes""" )
else:
_snake_case = pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
_snake_case = storage.field("""path""" )
else:
_snake_case = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
_snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_snake_case = pa.array(
[encode_np_array(np.array(UpperCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_snake_case = pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
_snake_case = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase , self.pa_type )
def lowercase (self , UpperCAmelCase ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase ):
with xopen(UpperCAmelCase , """rb""" ) as f:
_snake_case = f.read()
return bytes_
_snake_case = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_snake_case = pa.array(
[os.path.basename(UpperCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
_snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase , self.pa_type )
def __SCREAMING_SNAKE_CASE ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_snake_case = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = BytesIO()
if image.format in list_image_compression_formats():
_snake_case = image.format
else:
_snake_case = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(_SCREAMING_SNAKE_CASE , format=_SCREAMING_SNAKE_CASE )
return buffer.getvalue()
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_SCREAMING_SNAKE_CASE )}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
_snake_case = array.dtype
_snake_case = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
_snake_case = dtype.kind
_snake_case = dtype.itemsize
_snake_case = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_snake_case = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_snake_case = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_snake_case = dtype_byteorder + dtype_kind + str(_SCREAMING_SNAKE_CASE )
_snake_case = np.dtype(_SCREAMING_SNAKE_CASE )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_snake_case = PIL.Image.fromarray(array.astype(_SCREAMING_SNAKE_CASE ) )
return {"path": None, "bytes": image_to_bytes(_SCREAMING_SNAKE_CASE )}
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
_snake_case, _snake_case = first_non_null_value(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
_snake_case = no_op_if_value_is_null(_SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(_SCREAMING_SNAKE_CASE ) for obj in objs]
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
_snake_case = no_op_if_value_is_null(_SCREAMING_SNAKE_CASE )
return [obj_to_image_dict_func(_SCREAMING_SNAKE_CASE ) for obj in objs]
else:
return objs
else:
return objs | 585 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCAmelCase : Union[str, Any] = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase_ ( UpperCamelCase_ ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A_ )
def lowerCamelCase_ ( UpperCamelCase_ ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_a : str = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(A_ , id=A_ )
| 710 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def snake_case_ ( self : Any ) -> Any:
_a : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__snake_case , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__snake_case , '''neck_hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__snake_case , '''num_attention_heads''' ) )
class lowerCamelCase :
def __init__( self : str , __snake_case : List[str] , __snake_case : Optional[int]=13 , __snake_case : Any=32 , __snake_case : Union[str, Any]=2 , __snake_case : List[str]=3 , __snake_case : List[str]=640 , __snake_case : Any=4 , __snake_case : Union[str, Any]="silu" , __snake_case : Union[str, Any]=3 , __snake_case : str=32 , __snake_case : Optional[int]=0.1 , __snake_case : Tuple=0.1 , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.02 , __snake_case : str=True , __snake_case : Optional[int]=True , __snake_case : Tuple=10 , __snake_case : List[Any]=None , ) -> int:
_a : int = parent
_a : Any = batch_size
_a : Dict = image_size
_a : int = patch_size
_a : List[Any] = num_channels
_a : Dict = last_hidden_size
_a : Tuple = num_attention_heads
_a : List[str] = hidden_act
_a : Optional[Any] = conv_kernel_size
_a : Optional[Any] = output_stride
_a : int = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : Optional[int] = classifier_dropout_prob
_a : List[str] = use_labels
_a : Union[str, Any] = is_training
_a : Optional[int] = num_labels
_a : Dict = initializer_range
_a : List[str] = scope
def snake_case_ ( self : Dict ) -> Union[str, Any]:
_a : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = None
_a : List[Any] = None
if self.use_labels:
_a : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : int = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ ( self : Optional[Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any] ) -> Union[str, Any]:
_a : Optional[Any] = MobileViTModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_a : int = model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self : Dict , __snake_case : List[str] , __snake_case : int , __snake_case : int , __snake_case : Optional[Any] ) -> str:
_a : Any = self.num_labels
_a : List[Any] = MobileViTForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
_a : Tuple = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : Dict ) -> Optional[int]:
_a : List[Any] = self.num_labels
_a : Dict = MobileViTForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
_a : List[str] = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_a : Any = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ ( self : int ) -> str:
_a : List[str] = self.prepare_config_and_inputs()
_a , _a , _a , _a : int = config_and_inputs
_a : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : Optional[int] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[int] = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : str = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Tuple = False
def snake_case_ ( self : List[Any] ) -> List[Any]:
_a : str = MobileViTModelTester(self )
_a : List[Any] = MobileViTConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def snake_case_ ( self : Any ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViT does not use inputs_embeds''' )
def snake_case_ ( self : List[str] ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileViT does not support input and output embeddings''' )
def snake_case_ ( self : Any ) -> Union[str, Any]:
pass
@unittest.skip(reason='''MobileViT does not output attentions''' )
def snake_case_ ( self : List[str] ) -> str:
pass
def snake_case_ ( self : List[str] ) -> Union[str, Any]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(__snake_case )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[int] = [*signature.parameters.keys()]
_a : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def snake_case_ ( self : int ) -> Dict:
pass
def snake_case_ ( self : int ) -> Union[str, Any]:
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : Tuple ) -> Optional[int]:
def check_hidden_states_output(__snake_case : Union[str, Any] , __snake_case : str , __snake_case : Union[str, Any] ):
_a : List[Any] = model_class(__snake_case )
model.to(__snake_case )
model.eval()
with torch.no_grad():
_a : Optional[int] = model(**self._prepare_for_class(__snake_case , __snake_case ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 5
self.assertEqual(len(__snake_case ) , __snake_case )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_a : Optional[Any] = 2
for i in range(len(__snake_case ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[Any] = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : int = True
check_hidden_states_output(__snake_case , __snake_case , __snake_case )
def snake_case_ ( self : Tuple ) -> List[str]:
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def snake_case_ ( self : Dict ) -> Union[str, Any]:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case )
@slow
def snake_case_ ( self : Any ) -> str:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Dict = MobileViTModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase_ ( ):
_a : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def snake_case_ ( self : List[str] ) -> Dict:
return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None
@slow
def snake_case_ ( self : Tuple ) -> int:
_a : str = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(__snake_case )
_a : Tuple = self.default_image_processor
_a : Union[str, Any] = prepare_img()
_a : List[Any] = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
_a : Dict = model(**__snake_case )
# verify the logits
_a : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
_a : Dict = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1E-4 ) )
@slow
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_a : int = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_a : Union[str, Any] = model.to(__snake_case )
_a : Optional[int] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_a : List[Any] = prepare_img()
_a : int = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
_a : Dict = model(**__snake_case )
_a : Union[str, Any] = outputs.logits
# verify the logits
_a : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __snake_case )
_a : str = torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=__snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def snake_case_ ( self : Tuple ) -> Optional[int]:
_a : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_a : Tuple = model.to(__snake_case )
_a : Dict = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' )
_a : Tuple = prepare_img()
_a : Tuple = image_processor(images=__snake_case , return_tensors='''pt''' ).to(__snake_case )
# forward pass
with torch.no_grad():
_a : List[Any] = model(**__snake_case )
_a : List[str] = outputs.logits.detach().cpu()
_a : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(50, 60)] )
_a : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __snake_case )
_a : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
_a : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __snake_case )
| 249 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''Salesforce/blip-image-captioning-base'''
_UpperCamelCase : Dict = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
_UpperCamelCase : Dict = '''image_captioner'''
_UpperCamelCase : str = AutoModelForVisionaSeq
_UpperCamelCase : int = ['''image''']
_UpperCamelCase : Tuple = ['''text''']
def __init__( self : Optional[Any] , *_A : str , **_A : List[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''vision'''] )
super().__init__(*_snake_case , **_snake_case )
def __a ( self : List[str] , _A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.pre_processor(images=_snake_case , return_tensors='''pt''' )
def __a ( self : Tuple , _A : List[Any] ) -> str:
"""simple docstring"""
return self.model.generate(**_snake_case )
def __a ( self : Union[str, Any] , _A : Optional[Any] ) -> int:
"""simple docstring"""
return self.pre_processor.batch_decode(_snake_case , skip_special_tokens=_snake_case )[0].strip() | 217 |
def __lowerCAmelCase ( a__ , a__ ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 219 | 0 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any]):
a : str = 3
a : Any = 250
a : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase)
a : Tuple = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float) / length
return input_ids, scores
def __snake_case ( self : Optional[Any]):
a , a : Tuple = self._get_tensors(5)
a : Optional[int] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10),
MaxTimeCriteria(max_time=0.1),
])
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a , a : List[Any] = self._get_tensors(9)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a , a : Union[str, Any] = self._get_tensors(10)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
def __snake_case ( self : str):
a : Tuple = MaxLengthCriteria(max_length=10)
a , a : Optional[Any] = self._get_tensors(5)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a , a : List[str] = self._get_tensors(9)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a , a : str = self._get_tensors(10)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
def __snake_case ( self : str):
a : List[str] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5)
a , a : Optional[Any] = self._get_tensors(5)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a , a : Tuple = self._get_tensors(9)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a , a : List[str] = self._get_tensors(10)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
a : Optional[int] = StoppingCriteriaList([criteria])
self.assertEqual(criteria_list.max_length , 10)
def __snake_case ( self : Any):
a , a : Any = self._get_tensors(5)
a : Optional[int] = MaxTimeCriteria(max_time=0.1)
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase))
a : Any = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2)
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase))
def __snake_case ( self : Union[str, Any]):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 10)
with self.assertWarns(__UpperCAmelCase):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10)]) , 11)
a : List[Any] = validate_stopping_criteria(StoppingCriteriaList() , 11)
self.assertEqual(len(__UpperCAmelCase) , 1)
| 135 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Dict):
a : Tuple = 0
a : Any = [0]
a : List[Any] = [0]
a : List[Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : Dict = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[Any]):
a : Union[str, Any] = 3
a : Any = [1, 2, 3]
a : List[Any] = [3, 2, 1]
a : Optional[Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : str):
a : int = 50
a : Dict = [60, 100, 120]
a : Tuple = [10, 20, 30]
a : Optional[Any] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 135 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 518 |
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _SCREAMING_SNAKE_CASE ( snake_case ) -> str:
for pegasus_name, hf_name in PATTERNS:
_UpperCAmelCase = k.replace(snake_case , snake_case )
return k
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> PegasusForConditionalGeneration:
_UpperCAmelCase = DEFAULTS.copy()
cfg_kwargs.update(snake_case )
_UpperCAmelCase = PegasusConfig(**snake_case )
_UpperCAmelCase = PegasusForConditionalGeneration(snake_case )
_UpperCAmelCase = torch_model.model.state_dict()
_UpperCAmelCase = {}
for k, v in tf_weights.items():
_UpperCAmelCase = rename_state_dict_key(snake_case )
if new_k not in sd:
raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" )
if "dense" in k or "proj" in new_k:
_UpperCAmelCase = v.T
_UpperCAmelCase = torch.tensor(snake_case , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}"
# make sure embedding.padding_idx is respected
_UpperCAmelCase = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_UpperCAmelCase = mapping["""shared.weight"""]
_UpperCAmelCase = mapping["""shared.weight"""]
_UpperCAmelCase = {k: torch.zeros_like(snake_case ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = torch_model.model.load_state_dict(snake_case , strict=snake_case )
_UpperCAmelCase = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}"
assert extra == [], f"no matches found for the following tf keys {extra}"
return torch_model
def _SCREAMING_SNAKE_CASE ( snake_case="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
_UpperCAmelCase = tf.train.list_variables(snake_case )
_UpperCAmelCase = {}
_UpperCAmelCase = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(snake_case , desc="""converting tf checkpoint to dict""" ):
_UpperCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCAmelCase = tf.train.load_variable(snake_case , snake_case )
_UpperCAmelCase = array
return tf_weights
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> List[str]:
# save tokenizer first
_UpperCAmelCase = Path(snake_case ).parent.name
_UpperCAmelCase = task_specific_params[f"summarization_{dataset}"]["""max_position_embeddings"""]
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=snake_case )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(snake_case )
# convert model
_UpperCAmelCase = get_tf_weights_as_numpy(snake_case )
_UpperCAmelCase = task_specific_params[f"summarization_{dataset}"]
if dataset == "large":
_UpperCAmelCase = task_specific_params
_UpperCAmelCase = convert_pegasus(snake_case , snake_case )
torch_model.save_pretrained(snake_case )
_UpperCAmelCase = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(snake_case , Path(snake_case ) / """pytorch_model.bin""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
a = parser.parse_args()
if args.save_dir is None:
a = Path(args.tf_ckpt_path).parent.name
a = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir) | 518 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __a ( __UpperCamelCase ):
__lowercase : Dict = 'gpt_neo'
__lowercase : Tuple = ['past_key_values']
__lowercase : Union[str, Any] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , lowerCAmelCase__=50_257 , lowerCAmelCase__=2_048 , lowerCAmelCase__=2_048 , lowerCAmelCase__=24 , lowerCAmelCase__=[[["global", "local"], 12]] , lowerCAmelCase__=16 , lowerCAmelCase__=None , lowerCAmelCase__=256 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=True , lowerCAmelCase__=50_256 , lowerCAmelCase__=50_256 , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = vocab_size
lowercase__: List[Any] = max_position_embeddings
lowercase__: Dict = hidden_size
lowercase__: Optional[Any] = num_layers
lowercase__: int = num_heads
lowercase__: Optional[Any] = intermediate_size
lowercase__: Dict = window_size
lowercase__: Any = activation_function
lowercase__: Tuple = resid_dropout
lowercase__: Tuple = embed_dropout
lowercase__: Dict = attention_dropout
lowercase__: List[Any] = classifier_dropout
lowercase__: Dict = layer_norm_epsilon
lowercase__: Dict = initializer_range
lowercase__: Optional[int] = use_cache
lowercase__: Any = bos_token_id
lowercase__: List[Any] = eos_token_id
lowercase__: str = attention_types
lowercase__: Union[str, Any] = self.expand_attention_types_params(lowerCAmelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
lowercase__: Optional[int] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> Optional[Any]:
import torch
lowercase__: List[str] = input.size()
lowercase__: Optional[Any] = len(snake_case )
lowercase__: Optional[int] = shape[dimension]
lowercase__: List[Any] = torch.arange(0 , snake_case , snake_case )
lowercase__: Any = torch.div(sizedim - size , snake_case , rounding_mode='floor' ) + 1
lowercase__: Any = torch.arange(snake_case ) + low_indices[:min_length][:, None]
lowercase__: int = [slice(snake_case )] * rank
lowercase__: Optional[Any] = indices
lowercase__: int = input[s]
lowercase__: int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(snake_case )
def snake_case_ ( snake_case , snake_case ) -> List[Any]:
import torch
lowercase__: List[str] = torch.arange(1 , snake_case )
lowercase__: Optional[int] = torch.remainder(snake_case , snake_case )
lowercase__: Tuple = remainders == 0
lowercase__: int = candidates[divisor_indices]
lowercase__: Optional[Any] = torch.max(snake_case )
return largest_divisor, torch.div(snake_case , snake_case , rounding_mode='floor' )
class __a ( __UpperCamelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowercase__: List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction='inputs' )
lowercase__: Any = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowercase__: Optional[Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self._config.num_heads
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowercase__: List[str] = super(lowerCAmelCase__ , self ).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
# We need to order the input in the way they appears in the forward()
lowercase__: Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
lowercase__ , lowercase__: int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowercase__: Optional[Any] = seqlen + 2
lowercase__: Any = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__: str = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(self.num_layers )
]
lowercase__: Union[str, Any] = common_inputs['attention_mask']
if self.use_past:
lowercase__: str = ordered_inputs['attention_mask'].dtype
lowercase__: List[str] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return 13
| 335 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __a ( __UpperCamelCase ):
__lowercase : List[str] = 'perceiver'
def __init__( self , lowerCAmelCase__=256 , lowerCAmelCase__=1_280 , lowerCAmelCase__=768 , lowerCAmelCase__=1 , lowerCAmelCase__=26 , lowerCAmelCase__=8 , lowerCAmelCase__=8 , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="kv" , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=True , lowerCAmelCase__=262 , lowerCAmelCase__=2_048 , lowerCAmelCase__=56 , lowerCAmelCase__=[368, 496] , lowerCAmelCase__=16 , lowerCAmelCase__=1_920 , lowerCAmelCase__=16 , lowerCAmelCase__=[1, 16, 224, 224] , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowercase__: Tuple = num_latents
lowercase__: Dict = d_latents
lowercase__: Tuple = d_model
lowercase__: Tuple = num_blocks
lowercase__: List[Any] = num_self_attends_per_block
lowercase__: Any = num_self_attention_heads
lowercase__: Optional[int] = num_cross_attention_heads
lowercase__: Dict = qk_channels
lowercase__: Any = v_channels
lowercase__: Dict = cross_attention_shape_for_attention
lowercase__: int = self_attention_widening_factor
lowercase__: Tuple = cross_attention_widening_factor
lowercase__: Optional[Any] = hidden_act
lowercase__: Union[str, Any] = attention_probs_dropout_prob
lowercase__: str = initializer_range
lowercase__: str = layer_norm_eps
lowercase__: List[Any] = use_query_residual
# masked language modeling attributes
lowercase__: List[Any] = vocab_size
lowercase__: str = max_position_embeddings
# image classification attributes
lowercase__: Optional[Any] = image_size
# flow attributes
lowercase__: int = train_size
# multimodal autoencoding attributes
lowercase__: Dict = num_frames
lowercase__: int = audio_samples_per_frame
lowercase__: Optional[int] = samples_per_patch
lowercase__: Tuple = output_shape
class __a ( __UpperCamelCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__: Any = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__: List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
'''simple docstring'''
return 1E-4
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 40 , lowerCAmelCase__ = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__: Dict = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__: int = preprocessor.num_special_tokens_to_add(lowerCAmelCase__ )
lowercase__: Optional[int] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
lowercase__: str = [' '.join(['a'] ) * seq_length] * batch_size
lowercase__: Optional[int] = dict(preprocessor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
lowercase__: str = inputs.pop('input_ids' )
return inputs
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__: str = compute_effective_axis_dimension(lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase__: str = self._generate_dummy_images(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: str = dict(preprocessor(images=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
lowercase__: Union[str, Any] = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 335 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : str = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 38 | # We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = False ):
__A : str = scheduler
__A : Union[str, Any] = optimizers if isinstance(__UpperCAmelCase , (list, tuple) ) else [optimizers]
__A : Any = split_batches
__A : Tuple = step_with_optimizer
__A : Optional[Any] = GradientState()
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__A : Optional[Any] = AcceleratorState().num_processes
for _ in range(__UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase( self ):
return self.scheduler.get_last_lr()
def __UpperCAmelCase( self ):
return self.scheduler.state_dict()
def __UpperCAmelCase( self , __UpperCAmelCase ):
self.scheduler.load_state_dict(__UpperCAmelCase )
def __UpperCAmelCase( self ):
return self.scheduler.get_lr()
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.scheduler.print_lr(*__UpperCAmelCase , **__UpperCAmelCase )
| 520 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
lowerCAmelCase: List[Any] = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
lowerCAmelCase: str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
lowerCAmelCase: Optional[Any] = [2, 4, 1, 5]
lowerCAmelCase: List[str] = len(train_data)
lowerCAmelCase: Optional[int] = 0.009
def lowerCamelCase__ ( _A , _A="train" ):
return calculate_hypothesis_value(_A , _A ) - output(
_A , _A )
def lowerCamelCase__ ( _A ):
a : Any = 0
for i in range(len(_A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase__ ( _A , _A ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase__ ( _A , _A ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase__ ( _A , _A=m ):
a : List[Any] = 0
for i in range(_A ):
if index == -1:
summation_value += _error(_A )
else:
summation_value += _error(_A ) * train_data[i][0][index]
return summation_value
def lowerCamelCase__ ( _A ):
a : List[str] = summation_of_cost_derivative(_A , _A ) / m
return cost_derivative_value
def lowerCamelCase__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
a : List[str] = 0.000002
a : Union[str, Any] = 0
a : Union[str, Any] = 0
while True:
j += 1
a : str = [0, 0, 0, 0]
for i in range(0 , len(_A ) ):
a : Optional[Any] = get_cost_derivative(i - 1 )
a : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_A , _A , atol=_A , rtol=_A , ):
break
a : str = temp_parameter_vector
print(('Number of iterations:', j) )
def lowerCamelCase__ ( ):
for i in range(len(_A ) ):
print(('Actual output value:', output(_A , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(_A , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent() | 195 |
'''simple docstring'''
from math import factorial, pi
def lowerCamelCase__ ( _A , _A = 30 ):
if not isinstance(_A , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_A , _A ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
a : List[Any] = float(_A )
a : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_A ) )
def lowerCamelCase__ ( _A , _A = 30 ):
if not isinstance(_A , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_A , _A ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
a : List[str] = float(_A )
a : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5)) | 195 | 1 |
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
UpperCAmelCase_ : List[Any] = '<<<<<<< This should probably be modified because it mentions: '
UpperCAmelCase_ : List[str] = '=======\n>>>>>>>\n'
UpperCAmelCase_ : int = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
UpperCAmelCase_ : List[Any] = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase__ ( _snake_case ):
'''simple docstring'''
@staticmethod
def UpperCAmelCase_ ( __snake_case ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=__snake_case , required=__snake_case , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=__snake_case , required=__snake_case , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=__snake_case )
def __init__( self , __snake_case , __snake_case , *__snake_case ):
_SCREAMING_SNAKE_CASE : Tuple = get_logger("""datasets-cli/converting""" )
_SCREAMING_SNAKE_CASE : str = tfds_path
_SCREAMING_SNAKE_CASE : Dict = datasets_directory
def UpperCAmelCase_ ( self ):
if os.path.isdir(self._tfds_path ):
_SCREAMING_SNAKE_CASE : int = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_SCREAMING_SNAKE_CASE : List[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
_SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
_SCREAMING_SNAKE_CASE : Optional[int] = []
_SCREAMING_SNAKE_CASE : str = []
_SCREAMING_SNAKE_CASE : Any = {}
if os.path.isdir(self._tfds_path ):
_SCREAMING_SNAKE_CASE : Tuple = os.listdir(__snake_case )
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(__snake_case , __snake_case )
if not os.path.isfile(__snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(__snake_case , encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE : Any = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : List[str] = []
for line in lines:
_SCREAMING_SNAKE_CASE : Union[str, Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_SCREAMING_SNAKE_CASE : Tuple = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
_SCREAMING_SNAKE_CASE : int = """"""
continue
elif "from absl import logging" in out_line:
_SCREAMING_SNAKE_CASE : Tuple = """from datasets import logging\n"""
elif "getLogger" in out_line:
_SCREAMING_SNAKE_CASE : int = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(filter(lambda __snake_case : e in out_line , __snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__snake_case ) + """\n""" )
out_lines.append(__snake_case )
out_lines.append(__snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
_SCREAMING_SNAKE_CASE : str = re.sub(__snake_case , __snake_case , __snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , __snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
_SCREAMING_SNAKE_CASE : Any = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_SCREAMING_SNAKE_CASE : Optional[Any] = True
out_lines.append(__snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_SCREAMING_SNAKE_CASE : Optional[int] = f_name.replace(""".py""" , """""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(__snake_case , __snake_case )
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(__snake_case , __snake_case )
os.makedirs(__snake_case , exist_ok=__snake_case )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__snake_case )
if needs_manual_update:
with_manual_update.append(__snake_case )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as f:
f.writelines(__snake_case )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
_SCREAMING_SNAKE_CASE : List[Any] = os.path.basename(__snake_case )
_SCREAMING_SNAKE_CASE : List[str] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(__snake_case , __snake_case )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 533 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ : Optional[Any] = 16
UpperCAmelCase_ : List[str] = 32
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 16 , SCREAMING_SNAKE_CASE__ = "bert-base-cased" ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : List[str] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE__ ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_SCREAMING_SNAKE_CASE : str = datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=SCREAMING_SNAKE_CASE__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(SCREAMING_SNAKE_CASE__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE__ , collate_fn=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ )
return train_dataloader, eval_dataloader
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
model.eval()
_SCREAMING_SNAKE_CASE : Dict = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : Tuple = model(**SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE__ ) - 1:
_SCREAMING_SNAKE_CASE : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_SCREAMING_SNAKE_CASE : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE__ , references=SCREAMING_SNAKE_CASE__ , )
_SCREAMING_SNAKE_CASE : str = metric.compute()
return eval_metric["accuracy"]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE : Optional[int] = config["""lr"""]
_SCREAMING_SNAKE_CASE : Any = int(config["""num_epochs"""] )
_SCREAMING_SNAKE_CASE : Optional[Any] = int(config["""seed"""] )
_SCREAMING_SNAKE_CASE : Tuple = int(config["""batch_size"""] )
_SCREAMING_SNAKE_CASE : List[str] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = get_dataloaders(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_SCREAMING_SNAKE_CASE : Any = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE__ )
if accelerator.state.deepspeed_plugin is not None:
_SCREAMING_SNAKE_CASE : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1
_SCREAMING_SNAKE_CASE : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_SCREAMING_SNAKE_CASE : List[Any] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE__ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE__ , )
else:
_SCREAMING_SNAKE_CASE : Any = DummyScheduler(SCREAMING_SNAKE_CASE__ , total_num_steps=SCREAMING_SNAKE_CASE__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# We need to keep track of how many total steps we have iterated over
_SCREAMING_SNAKE_CASE : Optional[Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_SCREAMING_SNAKE_CASE : str = 0
_SCREAMING_SNAKE_CASE : Tuple = evaluate.load("""glue""" , """mrpc""" )
_SCREAMING_SNAKE_CASE : int = num_epochs
if args.partial_train_epoch is not None:
_SCREAMING_SNAKE_CASE : Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_SCREAMING_SNAKE_CASE : Any = args.resume_from_checkpoint.split("""epoch_""" )[1]
_SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_SCREAMING_SNAKE_CASE : int = int(SCREAMING_SNAKE_CASE__ ) + 1
_SCREAMING_SNAKE_CASE : List[str] = evaluation_loop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.print("""resumed checkpoint performance:""" , SCREAMING_SNAKE_CASE__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
_SCREAMING_SNAKE_CASE : Any = json.load(SCREAMING_SNAKE_CASE__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_SCREAMING_SNAKE_CASE : int = {}
for epoch in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = model(**SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = outputs.loss
_SCREAMING_SNAKE_CASE : int = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_SCREAMING_SNAKE_CASE : int = f"""epoch_{epoch}"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE__ )
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = evaluation_loop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Dict = accuracy
_SCREAMING_SNAKE_CASE : Any = lr_scheduler.get_lr()[0]
_SCREAMING_SNAKE_CASE : Any = optimizer.param_groups[0]["""lr"""]
_SCREAMING_SNAKE_CASE : Dict = epoch
_SCREAMING_SNAKE_CASE : Union[str, Any] = overall_step
accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=SCREAMING_SNAKE_CASE__ , )
parser.add_argument(
"""--output_dir""" , type=SCREAMING_SNAKE_CASE__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=SCREAMING_SNAKE_CASE__ , default=2 , help="""Number of train epochs.""" , )
_SCREAMING_SNAKE_CASE : str = parser.parse_args()
_SCREAMING_SNAKE_CASE : int = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 533 | 1 |
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : str ):
'''simple docstring'''
super().__init__()
A__ : int = nn.Linear(3 , 4 )
A__ : Tuple = nn.BatchNormad(4 )
A__ : Any = nn.Linear(4 , 5 )
def _UpperCamelCase ( self : str , snake_case : Any ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(snake_case ) ) )
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def _UpperCamelCase ( self : List[Any] , snake_case : Optional[Any] , *snake_case : List[str] , **snake_case : Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
def _UpperCamelCase ( self : Any , snake_case : Union[str, Any] , snake_case : str ):
'''simple docstring'''
return output + 1
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Optional[Any] = ModelForTest()
A__ : Optional[int] = ModelHook()
add_hook_to_module(snake_case , snake_case )
self.assertEqual(test_model._hf_hook , snake_case )
self.assertTrue(hasattr(snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(snake_case )
self.assertFalse(hasattr(snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(snake_case , """_old_forward""" ) )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : List[Any] = ModelForTest()
A__ : int = ModelHook()
add_hook_to_module(snake_case , snake_case )
add_hook_to_module(snake_case , snake_case , append=snake_case )
self.assertEqual(isinstance(test_model._hf_hook , snake_case ) , snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(snake_case )
self.assertFalse(hasattr(snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(snake_case , """_old_forward""" ) )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[Any] = ModelForTest()
A__ : Any = torch.randn(2 , 3 )
A__ : str = test_model(x + 1 )
A__ : str = test_model(x + 2 )
A__ : Any = PreForwardHook()
add_hook_to_module(snake_case , snake_case )
A__ : List[str] = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A__ : Any = PreForwardHook()
add_hook_to_module(snake_case , snake_case )
A__ : Union[str, Any] = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A__ : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(snake_case , snake_case )
A__ : List[str] = test_model(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1e-5 )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Any = ModelForTest()
A__ : int = torch.randn(2 , 3 )
A__ : Dict = test_model(snake_case )
A__ : Optional[int] = PostForwardHook()
add_hook_to_module(snake_case , snake_case )
A__ : Dict = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
A__ : List[str] = PostForwardHook()
add_hook_to_module(snake_case , snake_case )
A__ : int = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
A__ : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(snake_case , snake_case )
A__ : Any = test_model(snake_case )
assert torch.allclose(snake_case , output + 2 , atol=1e-5 )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Union[str, Any] = ModelForTest()
A__ : Any = torch.randn(2 , 3 )
A__ : Any = test_model(snake_case )
A__ : Optional[Any] = PostForwardHook()
add_hook_to_module(snake_case , snake_case )
A__ : str = test_model(snake_case )
self.assertTrue(torch.allclose(snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
A__ : Tuple = True
A__ : Any = test_model(snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
A__ : List[str] = torch.randn(2 , 3 )
A__ : Union[str, Any] = model(snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(snake_case , AlignDevicesHook(io_same_device=snake_case ) )
A__ : str = torch.randn(2 , 3 ).to(0 )
A__ : Any = model(snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
A__ : Any = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A__ : Any = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , snake_case )
A__ : Optional[int] = torch.randn(2 , 3 )
A__ : Union[str, Any] = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
A__ : Union[str, Any] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
A__ : str = torch.randn(2 , 3 )
A__ : Optional[Any] = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
A__ : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(snake_case , execution_device=snake_case , offload=snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A__ : Tuple = torch.device(snake_case )
self.assertEqual(model.batchnorm.running_mean.device , snake_case )
A__ : int = torch.randn(2 , 3 )
A__ : Dict = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(snake_case , execution_device=snake_case , offload=snake_case , offload_buffers=snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
A__ : Optional[int] = torch.randn(2 , 3 )
A__ : Optional[Any] = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
A__ : str = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
snake_case , execution_device=snake_case , offload=snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
A__ : Union[str, Any] = torch.device(snake_case )
self.assertEqual(model.batchnorm.running_mean.device , snake_case )
A__ : str = torch.randn(2 , 3 )
A__ : List[Any] = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
snake_case , execution_device=snake_case , offload=snake_case , weights_map=model.state_dict() , offload_buffers=snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
A__ : int = torch.randn(2 , 3 )
A__ : Dict = model(snake_case )
self.assertEqual(output.device , snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 498 |
"""simple docstring"""
from timeit import timeit
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
A__ : Optional[int] = 0
while number:
number &= number - 1
result += 1
return result
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
A__ : Tuple = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def _lowerCAmelCase ( ) ->None:
def do_benchmark(UpperCAmelCase__ : int ) -> None:
A__ : Optional[int] = """import __main__ as z"""
print(f'Benchmark when {number = }:' )
print(f'{get_set_bits_count_using_modulo_operator(UpperCAmelCase__ ) = }' )
A__ : Any = timeit("""z.get_set_bits_count_using_modulo_operator(25)""", setup=UpperCAmelCase__ )
print(f'timeit() runs in {timing} seconds' )
print(f'{get_set_bits_count_using_brian_kernighans_algorithm(UpperCAmelCase__ ) = }' )
A__ : List[str] = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""", setup=UpperCAmelCase__, )
print(f'timeit() runs in {timing} seconds' )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(UpperCAmelCase__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 498 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
snake_case_ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__lowercase )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
lowercase__ : Optional[Any] = tmp_path_factory.getbasetemp() / 'cache'
lowercase__ : List[Any] = test_hf_cache_home / 'datasets'
lowercase__ : Any = test_hf_cache_home / 'metrics'
lowercase__ : str = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__lowercase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__lowercase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__lowercase ) )
lowercase__ : Union[str, Any] = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__lowercase ) )
lowercase__ : List[Any] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__lowercase ) )
@pytest.fixture(autouse=__lowercase , scope='session' )
def snake_case__ ( ):
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=__lowercase )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __lowercase )
@pytest.fixture
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __lowercase )
| 164 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__SCREAMING_SNAKE_CASE :Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[str] = ['''ChineseCLIPFeatureExtractor''']
__SCREAMING_SNAKE_CASE :List[str] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :Union[str, Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 236 | 0 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase__ ( A__ , A__ ) -> str:
snake_case__ : Dict = BeautifulSoup(requests.get(A__ , params=A__ ).content , 'html.parser' )
snake_case__ : Any = soup.find('div' , attrs={'class': 'gs_ri'} )
snake_case__ : Optional[Any] = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : int = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 20_18,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 699 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = '''▁'''
lowerCAmelCase__ : List[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ : Optional[Any] = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCAmelCase__ : str = {
'''facebook/xglm-564M''': 20_48,
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCamelCase , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase = None , **__UpperCamelCase , ) -> None:
'''simple docstring'''
snake_case__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case__ : Tuple = 7
snake_case__ : Dict = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case__ : Union[str, Any] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
snake_case__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ : Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ : Tuple = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case__ : List[Any] = len(self.sp_model )
snake_case__ : Optional[Any] = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
snake_case__ : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.__dict__.copy()
snake_case__ : Optional[Any] = None
snake_case__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case__ : Any = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case__ : str = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : Optional[Any] = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : int = ''.join(__UpperCamelCase ).replace(__UpperCamelCase , ' ' ).strip()
return out_string
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
snake_case__ : Any = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 699 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class a :
"""simple docstring"""
__UpperCAmelCase : List[str] = XGLMConfig
__UpperCAmelCase : Dict = {}
__UpperCAmelCase : Union[str, Any] = "gelu"
def __init__( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : List[Any]=14 , lowerCamelCase : Dict=7 , lowerCamelCase : str=True , lowerCamelCase : Any=True , lowerCamelCase : str=True , lowerCamelCase : List[Any]=99 , lowerCamelCase : Tuple=32 , lowerCamelCase : Tuple=2 , lowerCamelCase : List[Any]=4 , lowerCamelCase : Tuple=37 , lowerCamelCase : str="gelu" , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : str=512 , lowerCamelCase : List[str]=0.02 , ) -> str:
__snake_case : str = parent
__snake_case : Any = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : Union[str, Any] = is_training
__snake_case : Optional[Any] = use_input_mask
__snake_case : str = use_labels
__snake_case : List[Any] = vocab_size
__snake_case : Union[str, Any] = d_model
__snake_case : Tuple = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : List[Any] = ffn_dim
__snake_case : List[Any] = activation_function
__snake_case : Dict = activation_dropout
__snake_case : str = attention_dropout
__snake_case : Tuple = max_position_embeddings
__snake_case : str = initializer_range
__snake_case : Optional[Any] = None
__snake_case : List[str] = 0
__snake_case : Tuple = 2
__snake_case : Any = 1
def __snake_case ( self : Optional[Any] ) -> int:
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def __snake_case ( self : Union[str, Any] ) -> Tuple:
__snake_case : str = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__snake_case : Dict = None
if self.use_input_mask:
__snake_case : str = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[Any] = self.get_config()
__snake_case : List[str] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __snake_case ( self : Any ) -> Optional[Any]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase , )
def __snake_case ( self : List[Any] ) -> Any:
__snake_case : Optional[int] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = config_and_inputs
__snake_case : List[Any] = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class a (__lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__UpperCAmelCase : str = (TFXGLMForCausalLM,) if is_tf_available() else ()
__UpperCAmelCase : Optional[Any] = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Any = False
__UpperCAmelCase : Union[str, Any] = False
def __snake_case ( self : Dict ) -> Any:
__snake_case : Optional[int] = TFXGLMModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase , n_embd=37 )
def __snake_case ( self : str ) -> List[Any]:
self.config_tester.run_common_tests()
@slow
def __snake_case ( self : int ) -> List[str]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = TFXGLMModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def __snake_case ( self : Dict ) -> Tuple:
super().test_resize_token_embeddings()
@require_tf
class a (unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : Union[str, Any] , lowerCamelCase : List[Any]=True ) -> List[Any]:
__snake_case : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__snake_case : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__snake_case : Dict = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__snake_case : Optional[int] = model.generate(lowerCamelCase , do_sample=lowerCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase )
@slow
def __snake_case ( self : List[Any] ) -> Tuple:
__snake_case : Dict = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__snake_case : List[str] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
__snake_case : str = tokenizer("Today is a nice day and" , return_tensors="tf" )
__snake_case : List[str] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
__snake_case : Optional[int] = model.generate(lowerCamelCase , do_sample=lowerCamelCase , seed=[7, 0] )
__snake_case : Dict = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase )
__snake_case : Tuple = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(lowerCamelCase , lowerCamelCase )
@slow
def __snake_case ( self : Union[str, Any] ) -> Dict:
__snake_case : Optional[int] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
__snake_case : Any = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
__snake_case : int = "left"
# use different length sentences to test batching
__snake_case : List[Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
__snake_case : int = tokenizer(lowerCamelCase , return_tensors="tf" , padding=lowerCamelCase )
__snake_case : Optional[int] = inputs["input_ids"]
__snake_case : int = model.generate(input_ids=lowerCamelCase , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
__snake_case : Optional[Any] = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
__snake_case : int = model.generate(input_ids=lowerCamelCase , max_new_tokens=12 )
__snake_case : List[str] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
__snake_case : Tuple = model.generate(input_ids=lowerCamelCase , max_new_tokens=12 )
__snake_case : Optional[Any] = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
__snake_case : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase )
__snake_case : str = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase )
__snake_case : List[Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , [non_padded_sentence, padded_sentence] )
| 81 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : List[str] = "informer"
lowerCAmelCase__ : List[str] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self : Optional[Any] , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : str = "student_t" , lowerCamelCase : str = "nll" , lowerCamelCase : int = 1 , lowerCamelCase : List[int] = None , lowerCamelCase : Optional[Union[str, bool]] = "mean" , lowerCamelCase : int = 0 , lowerCamelCase : int = 0 , lowerCamelCase : int = 0 , lowerCamelCase : int = 0 , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : int = 6_4 , lowerCamelCase : int = 3_2 , lowerCamelCase : int = 3_2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : int = 2 , lowerCamelCase : bool = True , lowerCamelCase : str = "gelu" , lowerCamelCase : float = 0.05 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : float = 0.1 , lowerCamelCase : int = 1_0_0 , lowerCamelCase : float = 0.02 , lowerCamelCase : Union[str, Any]=True , lowerCamelCase : str = "prob" , lowerCamelCase : int = 5 , lowerCamelCase : bool = True , **lowerCamelCase : Dict , ):
'''simple docstring'''
# time series specific configuration
a__ = prediction_length
a__ = context_length or prediction_length
a__ = distribution_output
a__ = loss
a__ = input_size
a__ = num_time_features
a__ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ = scaling
a__ = num_dynamic_real_features
a__ = num_static_real_features
a__ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ = cardinality
else:
a__ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ = embedding_dimension
else:
a__ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ = num_parallel_samples
# Transformer architecture configuration
a__ = input_size * len(self.lags_sequence ) + self._number_of_features
a__ = d_model
a__ = encoder_attention_heads
a__ = decoder_attention_heads
a__ = encoder_ffn_dim
a__ = decoder_ffn_dim
a__ = encoder_layers
a__ = decoder_layers
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = encoder_layerdrop
a__ = decoder_layerdrop
a__ = activation_function
a__ = init_std
a__ = use_cache
# Informer
a__ = attention_type
a__ = sampling_factor
a__ = distil
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 489 | 0 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCamelCase ( __a, __a=0.9_9_9, __a="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
SCREAMING_SNAKE_CASE_ = []
for i in range(__a ):
SCREAMING_SNAKE_CASE_ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ), __a ) )
return torch.tensor(__a, dtype=torch.floataa )
class snake_case ( __lowercase , __lowercase ):
UpperCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ = 2
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = 0.0_00_85 , SCREAMING_SNAKE_CASE_ = 0.0_12 , SCREAMING_SNAKE_CASE_ = "linear" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "epsilon" , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = "linspace" , SCREAMING_SNAKE_CASE_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ = torch.linspace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE_ = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
SCREAMING_SNAKE_CASE_ = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = use_karras_sigmas
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
"""simple docstring"""
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ = self.timesteps
SCREAMING_SNAKE_CASE_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
SCREAMING_SNAKE_CASE_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowercase (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = num_inference_steps
SCREAMING_SNAKE_CASE_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(0 , SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (np.arange(SCREAMING_SNAKE_CASE_ , 0 , -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
SCREAMING_SNAKE_CASE_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.interp(SCREAMING_SNAKE_CASE_ , np.arange(0 , len(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE_ = self._convert_to_karras(in_sigmas=SCREAMING_SNAKE_CASE_ , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE_ = np.array([self._sigma_to_t(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = timesteps.to(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = timesteps.to(device=SCREAMING_SNAKE_CASE_ )
# empty dt and derivative
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ = defaultdict(SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = np.log(SCREAMING_SNAKE_CASE_ )
# get distribution
SCREAMING_SNAKE_CASE_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ = low_idx + 1
SCREAMING_SNAKE_CASE_ = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ = np.clip(SCREAMING_SNAKE_CASE_ , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ = t.reshape(sigma.shape )
return t
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE_ = in_sigmas[0].item()
SCREAMING_SNAKE_CASE_ = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE_ = np.linspace(0 , 1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE_ = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowercase (self ):
"""simple docstring"""
return self.dt is None
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ = derivative
SCREAMING_SNAKE_CASE_ = dt
SCREAMING_SNAKE_CASE_ = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE_ = self.dt
SCREAMING_SNAKE_CASE_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ = [self.index_for_timestep(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for t in timesteps]
SCREAMING_SNAKE_CASE_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps | 628 |
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase__ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''maskformer'''
UpperCAmelCase__ = {'''hidden_size''': '''mask_feature_size'''}
UpperCAmelCase__ = ['''resnet''', '''swin''']
UpperCAmelCase__ = ['''detr''']
def __init__(self , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 2_56 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.02 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = 20.0 , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
SCREAMING_SNAKE_CASE_ = SwinConfig(
image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = backbone_config.pop('''model_type''' )
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
f'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
SCREAMING_SNAKE_CASE_ = DetrConfig()
else:
# verify that the decoder is supported
SCREAMING_SNAKE_CASE_ = (
decoder_config.pop('''model_type''' ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'Transformer Decoder {decoder_type} not supported, please use one of'
f' {",".join(self.decoders_supported )}' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = CONFIG_MAPPING[decoder_type]
SCREAMING_SNAKE_CASE_ = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = backbone_config
SCREAMING_SNAKE_CASE_ = decoder_config
# main feature dimension for the model
SCREAMING_SNAKE_CASE_ = fpn_feature_size
SCREAMING_SNAKE_CASE_ = mask_feature_size
# initializer
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = init_xavier_std
# Hungarian matcher && loss
SCREAMING_SNAKE_CASE_ = cross_entropy_weight
SCREAMING_SNAKE_CASE_ = dice_weight
SCREAMING_SNAKE_CASE_ = mask_weight
SCREAMING_SNAKE_CASE_ = use_auxiliary_loss
SCREAMING_SNAKE_CASE_ = no_object_weight
SCREAMING_SNAKE_CASE_ = output_auxiliary_logits
SCREAMING_SNAKE_CASE_ = self.decoder_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ = self.decoder_config.num_hidden_layers
super().__init__(**SCREAMING_SNAKE_CASE_ )
@classmethod
def _lowercase (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE_ , decoder_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE_ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.decoder_config.to_dict()
SCREAMING_SNAKE_CASE_ = self.__class__.model_type
return output | 628 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE__ = model(_snake_case )["last_hidden_state"]
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 159 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 159 | 1 |
from itertools import count
def lowerCamelCase_ ( A : int = 50 ):
"""simple docstring"""
lowerCAmelCase_ = [1] * min_block_length
for n in count(A ):
fill_count_functions.append(1 )
for block_length in range(A , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 701 |
import torch
def lowerCamelCase_ ( ):
"""simple docstring"""
if torch.cuda.is_available():
lowerCAmelCase_ = torch.cuda.device_count()
else:
lowerCAmelCase_ = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 413 | 0 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if dataset.ndim != value_array.ndim:
lowercase__ = (
"Wrong input data's dimensions... "
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowercase__ = (
"Wrong input data's shape... "
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
lowercase__ = (
"Input data have different datatype... "
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(SCREAMING_SNAKE_CASE_ )
lowercase__ = []
for value in value_array:
lowercase__ = euclidean(SCREAMING_SNAKE_CASE_ , dataset[0] )
lowercase__ = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowercase__ = euclidean(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if dist > temp_dist:
lowercase__ = temp_dist
lowercase__ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) / (norm(SCREAMING_SNAKE_CASE_ ) * norm(SCREAMING_SNAKE_CASE_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 413 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase_ = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowercase_ = cvtColor(img, COLOR_BGR2GRAY)
def __lowerCAmelCase ( ):
lowercase__ = cn.convert_to_negative(SCREAMING_SNAKE_CASE_ )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCAmelCase ( ):
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(SCREAMING_SNAKE_CASE_ , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def __lowerCAmelCase ( ):
lowercase__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCAmelCase ( ):
lowercase__ = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowercase__ = canny.canny(SCREAMING_SNAKE_CASE_ )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCAmelCase ( ):
assert gg.gaussian_filter(SCREAMING_SNAKE_CASE_ , 5 , sigma=0.9 ).all()
def __lowerCAmelCase ( ):
# laplace diagonals
lowercase__ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowercase__ = conv.img_convolve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).astype(SCREAMING_SNAKE_CASE_ )
assert res.any()
def __lowerCAmelCase ( ):
assert med.median_filter(SCREAMING_SNAKE_CASE_ , 3 ).any()
def __lowerCAmelCase ( ):
lowercase__ , lowercase__ = sob.sobel_filter(SCREAMING_SNAKE_CASE_ )
assert grad.any() and theta.any()
def __lowerCAmelCase ( ):
lowercase__ = sp.make_sepia(SCREAMING_SNAKE_CASE_ , 20 )
assert sepia.all()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" ):
lowercase__ = bs.Burkes(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "digital_image_processing/image_data/lena_small.jpg" , ):
lowercase__ = rs.NearestNeighbour(imread(SCREAMING_SNAKE_CASE_ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __lowerCAmelCase ( ):
lowercase__ = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
lowercase__ = imread(SCREAMING_SNAKE_CASE_ , 0 )
# Test for get_neighbors_pixel function() return not None
lowercase__ = 0
lowercase__ = 0
lowercase__ = image[x_coordinate][y_coordinate]
lowercase__ = lbp.get_neighbors_pixel(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowercase__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
lowercase__ = lbp.local_binary_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
assert lbp_image.any()
| 413 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _lowercase ( ) -> Any:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__UpperCamelCase ):
requests.request('GET' ,'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' ,'https://huggingface.co' ,timeout=1.0 )
@pytest.mark.integration
def _lowercase ( ) -> Optional[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' ,'https://huggingface.co' )
def _lowercase ( ) -> int:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__UpperCamelCase ):
http_head('https://huggingface.co' )
| 713 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__UpperCamelCase ):
__magic_name__ = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = FlaxAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__UpperCamelCase ):
__magic_name__ = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = FlaxAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__magic_name__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
__magic_name__ = FlaxBertModel.from_pretrained(__UpperCamelCase )
__magic_name__ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__UpperCamelCase: Optional[Any] ):
return model(**__UpperCamelCase )
eval(**__UpperCamelCase ).block_until_ready()
@slow
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__magic_name__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
__magic_name__ = FlaxRobertaModel.from_pretrained(__UpperCamelCase )
__magic_name__ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__UpperCamelCase: Any ):
return model(**__UpperCamelCase )
eval(**__UpperCamelCase ).block_until_ready()
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
__magic_name__ = FlaxAutoModel.from_pretrained('bert-base' )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__magic_name__ = FlaxAutoModel.from_pretrained(__UpperCamelCase , revision='aaaaaa' )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
__magic_name__ = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
with self.assertRaisesRegex(__UpperCamelCase , 'Use `from_pt=True` to load this model' ):
__magic_name__ = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 184 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
A_ : Union[str, Any] = {
"susnato/ernie-m-base_pytorch": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json",
"susnato/ernie-m-large_pytorch": "https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json",
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : int = 'ernie_m'
lowerCamelCase__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : List[str] , __UpperCAmelCase : int = 2_5_0_0_0_2 , __UpperCAmelCase : int = 7_6_8 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 1_2 , __UpperCAmelCase : int = 3_0_7_2 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 5_1_4 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : float = 1e-05 , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Union[str, Any]=0.0 , **__UpperCAmelCase : Any , ) -> Optional[Any]:
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = classifier_dropout
SCREAMING_SNAKE_CASE__ = is_decoder
SCREAMING_SNAKE_CASE__ = act_dropout
| 196 |
"""simple docstring"""
A_ : Any = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 196 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "rwkv"
lowerCAmelCase : Tuple = {"max_position_embeddings": "context_length"}
def __init__( self : Any ,_snake_case : Dict=50_277 ,_snake_case : List[Any]=1_024 ,_snake_case : Optional[int]=4_096 ,_snake_case : Optional[int]=32 ,_snake_case : str=None ,_snake_case : Dict=None ,_snake_case : List[Any]=1e-5 ,_snake_case : List[Any]=0 ,_snake_case : Optional[Any]=0 ,_snake_case : str=6 ,_snake_case : Any=False ,_snake_case : Any=True ,**_snake_case : List[str] ,) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = vocab_size
lowercase__ : Tuple = context_length
lowercase__ : Optional[int] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ : int = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ : Optional[Any] = layer_norm_epsilon
lowercase__ : Tuple = rescale_every
lowercase__ : Optional[int] = use_cache
lowercase__ : Any = bos_token_id
lowercase__ : Any = eos_token_id
super().__init__(
tie_word_embeddings=__UpperCamelCase ,bos_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,**__UpperCamelCase )
| 707 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase_ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase ( cls : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def UpperCAmelCase ( cls : List[str] ) -> str:
"""simple docstring"""
try:
delete_repo(token=cls._token ,repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def UpperCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('''test-config''' ,use_auth_token=self._token )
lowercase__ : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case ,getattr(_snake_case ,_snake_case ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case ,repo_id='''test-config''' ,push_to_hub=_snake_case ,use_auth_token=self._token )
lowercase__ : List[str] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case ,getattr(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' ,use_auth_token=self._token )
lowercase__ : Union[str, Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case ,getattr(_snake_case ,_snake_case ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case ,repo_id='''valid_org/test-config-org''' ,push_to_hub=_snake_case ,use_auth_token=self._token )
lowercase__ : Union[str, Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case ,getattr(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
CustomConfig.register_for_auto_class()
lowercase__ : int = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" ,trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'''CustomConfig''' )
self.assertEqual(new_config.attribute ,42 )
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__ : Tuple = c.n_embd + 1 # int
lowercase__ : int = c.resid_pdrop + 1.0 # float
lowercase__ : str = not c.scale_attn_weights # bool
lowercase__ : Union[str, Any] = c.summary_type + '''foo''' # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_snake_case ,c.n_embd ,'''mismatch for key: n_embd''' )
self.assertEqual(_snake_case ,c.resid_pdrop ,'''mismatch for key: resid_pdrop''' )
self.assertEqual(_snake_case ,c.scale_attn_weights ,'''mismatch for key: scale_attn_weights''' )
self.assertEqual(_snake_case ,c.summary_type ,'''mismatch for key: summary_type''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowercase__ : List[str] = PretrainedConfig()
lowercase__ : Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case ,['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
lowercase__ : Tuple = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case ,_snake_case )]
if len(_snake_case ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f""" {", ".join(_snake_case )}.""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
with self.assertRaises(_snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : Optional[int] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
lowercase__ : str = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' ,subfolder='''bert''' )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ : Any = mock.Mock()
lowercase__ : Dict = 500
lowercase__ : Any = {}
lowercase__ : Optional[int] = HTTPError
lowercase__ : Optional[int] = {}
# Download this model to make sure it's in the cache.
lowercase__ : str = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' ,return_value=_snake_case ) as mock_head:
lowercase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = AutoConfig.from_pretrained('''bert-base-cased''' )
lowercase__ : int = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case )
lowercase__ : Optional[int] = 2
json.dump(configuration.to_dict() ,open(os.path.join(_snake_case ,'''config.4.0.0.json''' ) ,'''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__ : Dict = ['''config.42.0.0.json''']
lowercase__ : Optional[Any] = 768
configuration.save_pretrained(_snake_case )
shutil.move(os.path.join(_snake_case ,'''config.4.0.0.json''' ) ,os.path.join(_snake_case ,'''config.42.0.0.json''' ) )
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertEqual(new_configuration.hidden_size ,768 )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
lowercase__ : Optional[int] = '''v4.0.0'''
lowercase__ , lowercase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case ,return_unused_kwargs=_snake_case )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__ : Optional[Any] = '''v3.0.0'''
lowercase__ : Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case )
self.assertEqual(old_configuration.hidden_size ,768 )
| 122 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = 'switch_transformers'
_UpperCamelCase : Optional[int] = ['past_key_values']
_UpperCamelCase : Optional[int] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self , snake_case=32128 , snake_case=768 , snake_case=64 , snake_case=2048 , snake_case=64 , snake_case=12 , snake_case=3 , snake_case=12 , snake_case=3 , snake_case=12 , snake_case=8 , snake_case=False , snake_case=0.01 , snake_case="float32" , snake_case=False , snake_case=32 , snake_case=128 , snake_case=0.1 , snake_case=1E-6 , snake_case=0.001 , snake_case=0.001 , snake_case=1.0 , snake_case="relu" , snake_case=True , snake_case=False , snake_case=True , snake_case=0 , snake_case=1 , **snake_case , ):
'''simple docstring'''
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = d_kv
UpperCamelCase__ = d_ff
UpperCamelCase__ = num_sparse_encoder_layers
UpperCamelCase__ = num_layers
UpperCamelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase__ = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
UpperCamelCase__ = self.num_layers // self.num_sparse_encoder_layers
else:
UpperCamelCase__ = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
UpperCamelCase__ = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
UpperCamelCase__ = self.num_decoder_layers # HACK: this will create 0 sparse layers
UpperCamelCase__ = num_heads
UpperCamelCase__ = num_experts
UpperCamelCase__ = expert_capacity
UpperCamelCase__ = router_bias
UpperCamelCase__ = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
UpperCamelCase__ = router_dtype
UpperCamelCase__ = router_ignore_padding_tokens
UpperCamelCase__ = relative_attention_num_buckets
UpperCamelCase__ = relative_attention_max_distance
UpperCamelCase__ = dropout_rate
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = feed_forward_proj
UpperCamelCase__ = use_cache
UpperCamelCase__ = add_router_probs
UpperCamelCase__ = router_z_loss_coef
UpperCamelCase__ = router_aux_loss_coef
UpperCamelCase__ = self.feed_forward_proj.split("-" )
UpperCamelCase__ = act_info[-1]
UpperCamelCase__ = act_info[0] == "gated"
if len(snake_case ) > 1 and act_info[0] != "gated" or len(snake_case ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCamelCase__ = "gelu_new"
super().__init__(
pad_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , **snake_case , )
| 551 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 551 | 1 |
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : int ):
"""simple docstring"""
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
__UpperCamelCase =''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296 | """simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
__UpperCamelCase =1
__UpperCamelCase =1
__UpperCamelCase ={1: 1}
for inputa in range(2 , __UpperCamelCase ):
__UpperCamelCase =0
__UpperCamelCase =inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__UpperCamelCase =(3 * number) + 1
counter += 1
if inputa not in counters:
__UpperCamelCase =counter
if counter > pre_counter:
__UpperCamelCase =inputa
__UpperCamelCase =counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 296 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370 | '''simple docstring'''
import os
from distutils.util import strtobool
def snake_case__ ( _A: int , _A: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for e in env_keys:
lowerCAmelCase = int(os.environ.get(_A , -1 ) )
if val >= 0:
return val
return default
def snake_case__ ( _A: str , _A: List[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase = os.environ.get(_A , str(_A ) )
return strtobool(_A ) == 1 # As its name indicates `strtobool` actually returns an int...
def snake_case__ ( _A: Dict , _A: List[Any]="no" ) -> str:
'''simple docstring'''
lowerCAmelCase = os.environ.get(_A , str(_A ) )
return value
| 370 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def a ( snake_case__: List[Any]=32 , snake_case__: Tuple=10 , snake_case__: Optional[Any]=100 , snake_case__: List[Any]=1_026 , snake_case__: int=True , snake_case__: Tuple="data/tokenized_stories_train_wikitext103.jbl" , snake_case__: Dict="igf_context_pairs.jbl" , ):
'''simple docstring'''
set_seed(3 )
# generate train_data and objective_set
lowercase_ , lowercase_ = generate_datasets(
snake_case__ , snake_case__ , number=snake_case__ , min_len=1_026 , trim=snake_case__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
lowercase_ = load_gpta('''gpt2''' ).to(snake_case__ )
print('''computing perplexity on objective set''' )
lowercase_ = compute_perplexity(snake_case__ , snake_case__ , snake_case__ ).item()
print('''perplexity on objective set:''' , snake_case__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def a ( snake_case__: Dict , snake_case__: Optional[int]=15 , snake_case__: Tuple=128 , snake_case__: List[Any]=100 , snake_case__: Union[str, Any]="igf_model.pt" , ):
'''simple docstring'''
set_seed(42 )
# Load pre-trained model
lowercase_ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
lowercase_ = SecondaryLearner(snake_case__ )
# Train secondary learner
lowercase_ = train_secondary_learner(
snake_case__ , snake_case__ , max_epochs=snake_case__ , batch_size=snake_case__ , eval_freq=100 , igf_model_path=snake_case__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def a ( snake_case__: int , snake_case__: Dict , snake_case__: Optional[Any] , snake_case__: str=32 , snake_case__: List[Any]=1_000 , snake_case__: Any=16 , snake_case__: Optional[Any]=1.0 , snake_case__: int=recopy_gpta , snake_case__: List[Any]=None , snake_case__: int=10 , snake_case__: List[Any]="gpt2_finetuned.pt" , ):
'''simple docstring'''
lowercase_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
lowercase_ = RandomSampler(snake_case__ )
lowercase_ = DataLoader(snake_case__ , sampler=snake_case__ )
lowercase_ = max_steps // (len(snake_case__ )) + 1
lowercase_ = 0
lowercase_ = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case__ )
lowercase_ , lowercase_ , lowercase_ = recopy_model(snake_case__ , snake_case__ , snake_case__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case__ )
secondary_learner.eval()
lowercase_ = []
lowercase_ = 0
lowercase_ = []
lowercase_ = []
# Compute the performance of the transformer model at the beginning
lowercase_ = compute_perplexity(snake_case__ , snake_case__ , snake_case__ )
test_perps.append(snake_case__ )
print('''Test perplexity, step''' , snake_case__ , ''':''' , snake_case__ )
for epoch in range(int(snake_case__ ) ):
for step, example in enumerate(snake_case__ ):
torch.cuda.empty_cache()
lowercase_ = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase_ = model(snake_case__ , labels=snake_case__ )
lowercase_ = True
if secondary_learner is not None:
lowercase_ = secondary_learner.forward(
torch.tensor(snake_case__ , dtype=torch.long , device=snake_case__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase_ = -1
if predicted_q < threshold:
lowercase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase_ = compute_perplexity(snake_case__ , snake_case__ , snake_case__ )
test_perps.append(snake_case__ )
print('''Test perplexity, step''' , snake_case__ , ''':''' , snake_case__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def a ( ):
'''simple docstring'''
lowercase_ = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=snake_case__ , default=snake_case__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=snake_case__ , default=snake_case__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=snake_case__ , type=snake_case__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=snake_case__ , default=snake_case__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=snake_case__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=100 , type=snake_case__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=100 , type=snake_case__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_000 , type=snake_case__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=128 , type=snake_case__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=snake_case__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=snake_case__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=100 , type=snake_case__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_026 , type=snake_case__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=snake_case__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=snake_case__ , type=snake_case__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=snake_case__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=snake_case__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=snake_case__ , type=snake_case__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=snake_case__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
lowercase_ = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
lowercase_ = training_secondary_learner(
snake_case__ , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
lowercase_ = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase_ , lowercase_ = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=100 , min_len=1_026 , trim=snake_case__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case__ , snake_case__ , snake_case__ , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=snake_case__ , secondary_learner=snake_case__ , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 409 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 409 | 1 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ : str = str(bin(A__ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Optional[int] = str(bin(A__ ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : str = max(len(A__ ) ,len(A__ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(A__ ) ,b_binary.zfill(A__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 95 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__UpperCamelCase : Optional[Any] = 6_378_137.0
__UpperCamelCase : Any = 6_356_752.314_245
__UpperCamelCase : Optional[int] = 6378137
def _UpperCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ):
"""simple docstring"""
__lowerCamelCase : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__lowerCamelCase : Tuple = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
__lowerCamelCase : Dict = atan((1 - flattening) * tan(radians(UpperCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__lowerCamelCase : Any = haversine_distance(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__lowerCamelCase : Dict = (b_lata + b_lata) / 2
__lowerCamelCase : Union[str, Any] = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__lowerCamelCase : str = (sin(UpperCAmelCase ) ** 2) * (cos(UpperCAmelCase ) ** 2)
__lowerCamelCase : List[Any] = cos(sigma / 2 ) ** 2
__lowerCamelCase : Dict = (sigma - sin(UpperCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__lowerCamelCase : Tuple = (cos(UpperCAmelCase ) ** 2) * (sin(UpperCAmelCase ) ** 2)
__lowerCamelCase : List[str] = sin(sigma / 2 ) ** 2
__lowerCamelCase : List[str] = (sigma + sin(UpperCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 519 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__magic_name__: str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
__magic_name__: ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
__magic_name__: ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
__magic_name__: str = "question"
__magic_name__: str = "context"
__magic_name__: str = "answers"
@property
def UpperCAmelCase_ ( self : int ) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 534 |
def SCREAMING_SNAKE_CASE__ ( __a ):
if not isinstance(__a , __a ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 534 | 1 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
UpperCAmelCase_ : List[str] = Vector()
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase_ ) , '(0,0,0,0,0,1)' )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase_ ) , 4 )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = Vector([1, 2] )
UpperCAmelCase_ : List[Any] = Vector([1, 2, 3, 4, 5] )
UpperCAmelCase_ : Tuple = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
UpperCAmelCase_ : List[Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = Vector([1, 2, 3] )
UpperCAmelCase_ : Tuple = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : List[str] = Vector([1, 2, 3] )
UpperCAmelCase_ : Optional[Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = Vector([1, 2, 3] )
UpperCAmelCase_ : List[str] = Vector([2, -1, 4] ) # for test of dot product
UpperCAmelCase_ : List[str] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def __UpperCAmelCase ( self ) -> Dict:
self.assertEqual(str(zero_vector(1_0 ) ).count('0' ) , 1_0 )
def __UpperCAmelCase ( self ) -> Optional[int]:
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = Vector([1, 2, 3] )
UpperCAmelCase_ : List[str] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase_ , UpperCAmelCase_ ) ) , '(3,4,7)' )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : List[str] = Vector([1, 0, 0, 0, 0, 0] )
UpperCAmelCase_ : Tuple = x.copy()
self.assertEqual(str(UpperCAmelCase_ ) , str(UpperCAmelCase_ ) )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Tuple = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase_ ) , '(0,1,0)' )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(UpperCAmelCase_ ) )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : List[Any] = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase_ , UpperCAmelCase_ ) )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : int = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase_ , UpperCAmelCase_ ) )
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
UpperCAmelCase_ : Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(UpperCAmelCase_ ) )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
UpperCAmelCase_ : Any = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def __UpperCAmelCase ( self ) -> str:
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 406 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __A ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowerCAmelCase_ : str = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
lowerCAmelCase : int = CursorInfo()
lowerCAmelCase : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCAmelCase, ctypes.byref(_UpperCAmelCase ) )
lowerCAmelCase : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCAmelCase, ctypes.byref(_UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
'''simple docstring'''
if os.name == "nt":
lowerCAmelCase : str = CursorInfo()
lowerCAmelCase : List[str] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCAmelCase, ctypes.byref(_UpperCAmelCase ) )
lowerCAmelCase : Optional[int] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCAmelCase, ctypes.byref(_UpperCAmelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 343 | 0 |
"""simple docstring"""
import sys
import turtle
def __lowerCAmelCase ( lowercase : tuple[float, float] , lowercase : tuple[float, float] ) -> tuple[float, float]:
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __lowerCAmelCase ( lowercase : tuple[float, float] , lowercase : tuple[float, float] , lowercase : tuple[float, float] , lowercase : int , ) -> None:
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowercase , get_mid(lowercase , lowercase ) , get_mid(lowercase , lowercase ) , depth - 1 )
triangle(lowercase , get_mid(lowercase , lowercase ) , get_mid(lowercase , lowercase ) , depth - 1 )
triangle(lowercase , get_mid(lowercase , lowercase ) , get_mid(lowercase , lowercase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
__snake_case = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
__snake_case = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 117 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
snake_case : Optional[int] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Dict = PyTorchBenchmark(UpperCamelCase__ )
snake_case : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = "sgugger/tiny-distilbert-classification"
snake_case : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , torchscript=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : int = PyTorchBenchmark(UpperCamelCase__ )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[Any] = "sshleifer/tiny-gpt2"
snake_case : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , fpaa=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : List[Any] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : Optional[int] = "sshleifer/tiny-gpt2"
snake_case : int = AutoConfig.from_pretrained(UpperCamelCase__ )
# set architectures equal to `None`
snake_case : str = None
snake_case : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : List[Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Tuple = "sshleifer/tiny-gpt2"
snake_case : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : List[str] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = "sshleifer/tiny-gpt2"
snake_case : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
snake_case : Optional[int] = PyTorchBenchmark(UpperCamelCase__ )
snake_case : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Tuple = "sshleifer/tiny-gpt2"
snake_case : List[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = "sshleifer/tinier_bart"
snake_case : List[str] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Optional[Any] = "sshleifer/tiny-gpt2"
snake_case : str = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Optional[int] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : str = "sshleifer/tinier_bart"
snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
snake_case : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
snake_case : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Any = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(UpperCamelCase__ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(UpperCamelCase__ , "train_time.csv" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , "env.csv" ) , multi_process=UpperCamelCase__ , )
snake_case : List[Any] = PyTorchBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "env.csv" ) ).exists() )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(UpperCamelCase__ ):
self.assertTrue(hasattr(UpperCamelCase__ , "sequential" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "cumulative" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "current" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , "log.txt" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
snake_case : Dict = PyTorchBenchmark(UpperCamelCase__ )
snake_case : List[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "log.txt" ) ).exists() )
| 117 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase = [
'good first issue',
'feature request',
'wip',
]
def _UpperCAmelCase ( ):
a_ : Optional[int] = Github(os.environ['''GITHUB_TOKEN'''] )
a_ : Tuple = g.get_repo('''huggingface/accelerate''' )
a_ : Optional[Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
a_ : int = sorted([comment for comment in issue.get_comments()] , key=lambda __A : i.created_at , reverse=__A )
a_ : int = comments[0] if len(__A ) > 0 else None
a_ : Optional[int] = dt.utcnow()
a_ : int = (current_time - issue.updated_at).days
a_ : int = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 466 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__lowerCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def _UpperCAmelCase ( __A : str , __A : Optional[Any]=1_00 , __A : int=" " ):
a_ : Optional[int] = text.split(__A )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__A ) , __A )]
def _UpperCAmelCase ( __A : dict ):
a_ , a_ : List[Any] = [], []
for title, text in zip(documents['''title'''] , documents['''text'''] ):
if text is not None:
for passage in split_text(__A ):
titles.append(title if title is not None else '''''' )
texts.append(__A )
return {"title": titles, "text": texts}
def _UpperCAmelCase ( __A : dict , __A : DPRContextEncoder , __A : DPRContextEncoderTokenizerFast ):
a_ : int = ctx_tokenizer(
documents['''title'''] , documents['''text'''] , truncation=__A , padding='''longest''' , return_tensors='''pt''' )['''input_ids''']
a_ : List[Any] = ctx_encoder(input_ids.to(device=__A ) , return_dict=__A ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def _UpperCAmelCase ( __A : "RagExampleArguments" , __A : "ProcessingArguments" , __A : "IndexHnswArguments" , ):
######################################
logger.info('''Step 1 - Create the dataset''' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
a_ : List[str] = load_dataset(
'''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text'''] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
a_ : Dict = dataset.map(__A , batched=__A , num_proc=processing_args.num_proc )
# And compute the embeddings
a_ : str = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__A )
a_ : Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
a_ : Union[str, Any] = Features(
{'''text''': Value('''string''' ), '''title''': Value('''string''' ), '''embeddings''': Sequence(Value('''float32''' ) )} ) # optional, save as float32 instead of float64 to save space
a_ : Union[str, Any] = dataset.map(
partial(__A , ctx_encoder=__A , ctx_tokenizer=__A ) , batched=__A , batch_size=processing_args.batch_size , features=__A , )
# And finally save your dataset
a_ : Tuple = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''' )
dataset.save_to_disk(__A )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('''Step 2 - Index the dataset''' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
a_ : Dict = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('''embeddings''' , custom_index=__A )
# And save the index
a_ : Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''' )
dataset.get_index('''embeddings''' ).save(__A )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , )
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , )
snake_case__ = field(
default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , )
snake_case__ = field(
default="facebook/dpr-ctx_encoder-multiset-base" , metadata={
"help": (
"The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"
" 'facebook/dpr-ctx_encoder-multiset-base'"
)
} , )
snake_case__ = field(
default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": "The number of processes to use to split the documents into passages. Default is single process."
} , )
snake_case__ = field(
default=16 , metadata={
"help": "The batch size to use when computing the passages embeddings using the DPR context encoder."
} , )
@dataclass
class SCREAMING_SNAKE_CASE :
snake_case__ = field(
default=768 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , )
snake_case__ = field(
default=128 , metadata={
"help": (
"The number of bi-directional links created for every new element during the HNSW index construction."
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__lowerCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 466 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def _snake_case ( snake_case__ : Callable[[int | float], int | float] , snake_case__ : int | float , snake_case__ : int | float , snake_case__ : int = 100 , ):
A = x_start
A = fnc(snake_case__ )
A = 0.0
for _ in range(snake_case__ ):
# Approximates curve as a sequence of linear lines and sums their length
A = (x_end - x_start) / steps + xa
A = fnc(snake_case__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
A = xa
A = fxa
return length
if __name__ == "__main__":
def _snake_case ( snake_case__ : Dict ):
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
_lowercase = 10
while i <= 10_00_00:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10 | 22 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 | 1 |
import baseaa
def _lowerCamelCase( __snake_case ) -> bytes:
return baseaa.aaaencode(string.encode("utf-8" ) )
def _lowerCamelCase( __snake_case ) -> str:
return baseaa.aaadecode(__snake_case ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 524 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
lowerCamelCase__ = '▁'
# Segments (not really needed)
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 2
lowerCamelCase__ = 3
lowerCamelCase__ = 4
class UpperCamelCase ( snake_case__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = """left"""
__UpperCamelCase = XLNetTokenizer
def __init__( self : List[str] ,_lowerCAmelCase : List[Any]=None ,_lowerCAmelCase : Optional[Any]=None ,_lowerCAmelCase : Optional[Any]=False ,_lowerCAmelCase : int=True ,_lowerCAmelCase : List[str]=False ,_lowerCAmelCase : Union[str, Any]="<s>" ,_lowerCAmelCase : str="</s>" ,_lowerCAmelCase : Tuple="<unk>" ,_lowerCAmelCase : Tuple="<sep>" ,_lowerCAmelCase : str="<pad>" ,_lowerCAmelCase : Any="<cls>" ,_lowerCAmelCase : Any="<mask>" ,_lowerCAmelCase : List[Any]=["<eop>", "<eod>"] ,**_lowerCAmelCase : Any ,):
"""simple docstring"""
__snake_case = AddedToken(_lowerCAmelCase ,lstrip=_lowerCAmelCase ,rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) else mask_token
super().__init__(
vocab_file=_lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,remove_space=_lowerCAmelCase ,keep_accents=_lowerCAmelCase ,bos_token=_lowerCAmelCase ,eos_token=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,additional_special_tokens=_lowerCAmelCase ,**_lowerCAmelCase ,)
__snake_case = 3
__snake_case = do_lower_case
__snake_case = remove_space
__snake_case = keep_accents
__snake_case = vocab_file
__snake_case = False if not self.vocab_file else True
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : List[int] ,_lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase_ ( self : Any ,_lowerCAmelCase : List[int] ,_lowerCAmelCase : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase_ ( self : List[Any] ,_lowerCAmelCase : str ,_lowerCAmelCase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case = os.path.join(
_lowerCAmelCase ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file ,_lowerCAmelCase )
return (out_vocab_file,)
| 524 | 1 |
def __lowerCamelCase ( A__ : int ) -> Union[str, Any]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : int = str(UpperCAmelCase__ )
while len(UpperCAmelCase__ ) != 1:
lowerCamelCase_ : Dict = [int(UpperCAmelCase__ ) for i in num_string]
lowerCamelCase_ : Tuple = 1
for i in range(0 , len(UpperCAmelCase__ ) ):
total *= numbers[i]
lowerCamelCase_ : List[Any] = str(UpperCAmelCase__ )
steps += 1
return steps
def __lowerCamelCase ( A__ : int ) -> Dict:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowerCamelCase_ : str = 0
lowerCamelCase_ : Optional[Any] = str(UpperCAmelCase__ )
while len(UpperCAmelCase__ ) != 1:
lowerCamelCase_ : List[Any] = [int(UpperCAmelCase__ ) for i in num_string]
lowerCamelCase_ : Union[str, Any] = 0
for i in range(0 , len(UpperCAmelCase__ ) ):
total += numbers[i]
lowerCamelCase_ : Tuple = str(UpperCAmelCase__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod() | 710 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : Tuple=1e-12 ) -> str:
lowerCamelCase_ : Optional[int] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
lowerCamelCase_ : List[str] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
return jnp.matmul(A__ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE_ (nn.Module ):
'''simple docstring'''
_a = 42
_a = jnp.floataa
def _lowerCAmelCase ( self : str ) ->str:
lowerCamelCase_ : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
lowerCamelCase_ : Optional[int] = nn.Dense(self.config.projection_dim , use_bias=__a , dtype=self.dtype )
lowerCamelCase_ : str = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowerCamelCase_ : List[Any] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCamelCase_ : List[Any] = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
lowerCamelCase_ : List[Any] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : Any , __a : Dict ) ->Optional[Any]:
lowerCamelCase_ : Optional[int] = self.vision_model(__a )[1]
lowerCamelCase_ : str = self.visual_projection(__a )
lowerCamelCase_ : Tuple = jax_cosine_distance(__a , self.special_care_embeds )
lowerCamelCase_ : Any = jax_cosine_distance(__a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCamelCase_ : Dict = 0.0
lowerCamelCase_ : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCamelCase_ : int = jnp.round(__a , 3 )
lowerCamelCase_ : str = jnp.any(special_scores > 0 , axis=1 , keepdims=__a )
# Use a lower threshold if an image has any special care concept
lowerCamelCase_ : List[Any] = is_special_care * 0.01
lowerCamelCase_ : List[str] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCamelCase_ : Union[str, Any] = jnp.round(__a , 3 )
lowerCamelCase_ : Optional[int] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = CLIPConfig
_a = "clip_input"
_a = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : List[Any] , __a : CLIPConfig , __a : Optional[Tuple] = None , __a : int = 0 , __a : jnp.dtype = jnp.floataa , __a : bool = True , **__a : int , ) ->List[Any]:
if input_shape is None:
lowerCamelCase_ : str = (1, 224, 224, 3)
lowerCamelCase_ : List[Any] = self.module_class(config=__a , dtype=__a , **__a )
super().__init__(__a , __a , input_shape=__a , seed=__a , dtype=__a , _do_init=_do_init )
def _lowerCAmelCase ( self : Tuple , __a : jax.random.KeyArray , __a : Tuple , __a : FrozenDict = None ) ->FrozenDict:
# init input tensor
lowerCamelCase_ : Optional[Any] = jax.random.normal(__a , __a )
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = jax.random.split(__a )
lowerCamelCase_ : Dict = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCamelCase_ : Tuple = self.module.init(__a , __a )["""params"""]
return random_params
def __call__( self : List[Any] , __a : List[str] , __a : dict = None , ) ->int:
lowerCamelCase_ : List[str] = jnp.transpose(__a , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(__a , dtype=jnp.floataa ) , rngs={} , )
| 171 | 0 |
"""simple docstring"""
def _snake_case ( __snake_case : int ):
"""simple docstring"""
return str(__snake_case ) == str(__snake_case )[::-1]
def _snake_case ( __snake_case : int ):
"""simple docstring"""
return int(__snake_case ) + int(str(__snake_case )[::-1] )
def _snake_case ( __snake_case : int = 10000 ):
"""simple docstring"""
_lowerCamelCase : str = []
for num in range(1 , __snake_case ):
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Tuple = num
while iterations < 50:
_lowerCamelCase : str = sum_reverse(__snake_case )
iterations += 1
if is_palindrome(__snake_case ):
break
else:
lychrel_nums.append(__snake_case )
return len(__snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _snake_case ( a__ ):
lowerCAmelCase :UNetaDModel
lowerCAmelCase :ScoreSdeVeScheduler
def __init__( self , _lowerCamelCase , _lowerCamelCase):
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase)
@torch.no_grad()
def __call__( self , _lowerCamelCase = 1 , _lowerCamelCase = 2000 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , **_lowerCamelCase , ):
UpperCAmelCase__ : Union[str, Any] = self.unet.config.sample_size
UpperCAmelCase__ : Any = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ : Optional[int] = self.unet
UpperCAmelCase__ : Any = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase) * self.scheduler.init_noise_sigma
UpperCAmelCase__ : Optional[int] = sample.to(self.device)
self.scheduler.set_timesteps(_lowerCamelCase)
self.scheduler.set_sigmas(_lowerCamelCase)
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
UpperCAmelCase__ : List[str] = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device)
# correction step
for _ in range(self.scheduler.config.correct_steps):
UpperCAmelCase__ : List[str] = self.unet(_lowerCamelCase , _lowerCamelCase).sample
UpperCAmelCase__ : List[Any] = self.scheduler.step_correct(_lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase).prev_sample
# prediction step
UpperCAmelCase__ : Any = model(_lowerCamelCase , _lowerCamelCase).sample
UpperCAmelCase__ : Union[str, Any] = self.scheduler.step_pred(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = output.prev_sample, output.prev_sample_mean
UpperCAmelCase__ : Optional[Any] = sample_mean.clamp(0 , 1)
UpperCAmelCase__ : List[str] = sample.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
UpperCAmelCase__ : str = self.numpy_to_pil(_lowerCamelCase)
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_lowerCamelCase) | 407 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : str = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 710 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCAmelCase__ ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
__A : Optional[Any] = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def snake_case__ ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
A__ : Optional[Any] = CursorInfo()
A__ : Tuple = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
A__ : Any = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25l" )
sys.stdout.flush()
def snake_case__ ( ) -> Dict:
"""simple docstring"""
if os.name == "nt":
A__ : List[str] = CursorInfo()
A__ : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
A__ : int = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowercase , ctypes.byref(__lowercase ) )
elif os.name == "posix":
sys.stdout.write("\033[?25h" )
sys.stdout.flush()
@contextmanager
def snake_case__ ( ) -> Optional[int]:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor() | 182 | 0 |
from __future__ import annotations
from typing import Any
class snake_case_ :
def __init__( self , __lowerCAmelCase = 6 ):
SCREAMING_SNAKE_CASE_ : Node | None = None
SCREAMING_SNAKE_CASE_ : Node | None = None
self.create_linked_list(__UpperCamelCase )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = Node()
SCREAMING_SNAKE_CASE_ : Dict = current_node
SCREAMING_SNAKE_CASE_ : List[str] = current_node
SCREAMING_SNAKE_CASE_ : int = current_node
for _ in range(1 , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : int = Node()
SCREAMING_SNAKE_CASE_ : Any = current_node
SCREAMING_SNAKE_CASE_ : Optional[int] = previous_node
SCREAMING_SNAKE_CASE_ : List[Any] = current_node
SCREAMING_SNAKE_CASE_ : List[str] = self.front
SCREAMING_SNAKE_CASE_ : int = previous_node
def __A ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __A ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def __A ( self , __lowerCAmelCase ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rear.next
if self.rear:
SCREAMING_SNAKE_CASE_ : Optional[Any] = data
def __A ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.front.data
SCREAMING_SNAKE_CASE_ : Tuple = None
return data
SCREAMING_SNAKE_CASE_ : str = self.front
SCREAMING_SNAKE_CASE_ : str = old_front.next
SCREAMING_SNAKE_CASE_ : Dict = old_front.data
SCREAMING_SNAKE_CASE_ : Optional[int] = None
return data
def __A ( self ):
if self.is_empty():
raise Exception('Empty Queue' )
def __A ( self ):
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class snake_case_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Any | None = None
SCREAMING_SNAKE_CASE_ : Node | None = None
SCREAMING_SNAKE_CASE_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCamelCase : Union[str, Any] = ""
__lowerCamelCase : Dict = ""
__lowerCamelCase : Optional[int] = ""
__lowerCamelCase : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
A__ , A__ : Optional[int] =get_dataset(snake_case_, snake_case_ )
print('''Processing...''' )
A__ , A__ , A__ : List[Any] =update_image_and_anno(snake_case_, snake_case_, snake_case_ )
for index, image in enumerate(snake_case_ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
A__ : List[Any] =random_chars(3_2 )
A__ : Union[str, Any] =paths[index].split(os.sep )[-1].rsplit('''.''', 1 )[0]
A__ : Any =f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg', snake_case_, [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f'Success {index+1}/{len(snake_case_ )} with {file_name}' )
A__ : str =[]
for anno in new_annos[index]:
A__ : Optional[Any] =f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(snake_case_ )
with open(f'/{file_root}.txt', '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> tuple[list, list]:
A__ : int =[]
A__ : int =[]
for label_file in glob.glob(os.path.join(snake_case_, '''*.txt''' ) ):
A__ : Optional[int] =label_file.split(os.sep )[-1].rsplit('''.''', 1 )[0]
with open(snake_case_ ) as in_file:
A__ : Union[str, Any] =in_file.readlines()
A__ : Union[str, Any] =os.path.join(snake_case_, f'{label_name}.jpg' )
A__ : Optional[Any] =[]
for obj_list in obj_lists:
A__ : Optional[int] =obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ = 1 ) -> tuple[list, list, list]:
A__ : List[Any] =[]
A__ : List[str] =[]
A__ : Any =[]
for idx in range(len(snake_case_ ) ):
A__ : int =[]
A__ : Any =img_list[idx]
path_list.append(snake_case_ )
A__ : str =anno_list[idx]
A__ : List[str] =cva.imread(snake_case_ )
if flip_type == 1:
A__ : Optional[int] =cva.flip(snake_case_, snake_case_ )
for bbox in img_annos:
A__ : List[str] =1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
A__ : List[str] =cva.flip(snake_case_, snake_case_ )
for bbox in img_annos:
A__ : Optional[int] =1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(snake_case_ )
new_imgs_list.append(snake_case_ )
return new_imgs_list, new_annos_lists, path_list
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 3_2 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
A__ : Optional[int] =ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 416 | 0 |
def __UpperCAmelCase ( __A , __A ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = [1]
for i in range(2 , __A ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
UpperCAmelCase__ = []
UpperCAmelCase__ = list(range(__A ) )
# Find permutation
while factorials:
UpperCAmelCase__ = factorials.pop()
UpperCAmelCase__ , UpperCAmelCase__ = divmod(__A , __A )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
A = logging.get_logger(__name__)
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , *_lowercase : Any , **_lowercase : Optional[int] ):
"""simple docstring"""
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 277 | 0 |
def a__ ( A__, A__ ):
return int((input_a, input_a).count(1 ) != 0 )
def a__ ( ):
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 101 |
"""simple docstring"""
from collections.abc import Sequence
def _lowerCAmelCase(a : Sequence[float] , a : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a ) )
def _lowerCAmelCase(a : Sequence[float] , a : float ) -> float:
_SCREAMING_SNAKE_CASE =0.0
for coeff in reversed(a ):
_SCREAMING_SNAKE_CASE =result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : Dict = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : Dict = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 255 | 0 |
'''simple docstring'''
def _snake_case ( lowercase ) -> int:
__a : Any = []
for data in source_data:
for i, el in enumerate(__lowerCAmelCase ):
if len(__lowerCAmelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__lowerCAmelCase ) )
return data_lists
def _snake_case ( lowercase , lowercase ) -> Any:
__a : Dict = []
for dlist, weight in zip(__lowerCAmelCase , __lowerCAmelCase ):
__a : Optional[Any] = min(__lowerCAmelCase )
__a : List[str] = max(__lowerCAmelCase )
__a : List[str] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__a : Tuple = F"""Invalid weight of {weight:f} provided"""
raise ValueError(__lowerCAmelCase )
score_lists.append(__lowerCAmelCase )
return score_lists
def _snake_case ( lowercase ) -> Optional[Any]:
__a : int = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__lowerCAmelCase ):
__a : Dict = final_scores[j] + ele
return final_scores
def _snake_case ( lowercase , lowercase ) -> int:
__a : str = get_data(__lowerCAmelCase )
__a : Optional[int] = calculate_each_score(__lowerCAmelCase , __lowerCAmelCase )
__a : List[str] = generate_final_scores(__lowerCAmelCase )
# append scores to source data
for i, ele in enumerate(__lowerCAmelCase ):
source_data[i].append(__lowerCAmelCase )
return source_data | 709 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__SCREAMING_SNAKE_CASE : List[str] = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
__SCREAMING_SNAKE_CASE : Tuple = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
__SCREAMING_SNAKE_CASE : Dict = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 14]),
('2H 5D 3C AS 5S', False, [14, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [14, 13, 12, 11, 10]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
__SCREAMING_SNAKE_CASE : int = (
('JH AH TH KH QH', 23),
('JH 9H TH KH QH', 22),
('JC KH JS JD JH', 21),
('KH KC 3S 3H 3D', 20),
('8C 9C 5C 3C TC', 19),
('JS QS 9H TS KH', 18),
('7C 7S KH 2H 7H', 17),
('3C KH 5D 5S KH', 16),
('QH 8H KD JH 8S', 15),
('2D 6D 9D TH 7D', 14),
)
def _snake_case ( ) -> List[str]:
__a , __a : List[Any] = randrange(len(lowercase ) ), randrange(len(lowercase ) )
__a : int = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
__a , __a : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _snake_case ( lowercase = 1_0_0 ) -> Any:
return (generate_random_hand() for _ in range(lowercase ))
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> int:
assert PokerHand(lowercase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Any:
assert PokerHand(lowercase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> List[str]:
__a : Union[str, Any] = PokerHand(lowercase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , lowercase )
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
assert PokerHand(lowercase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , lowercase )
def _snake_case ( lowercase , lowercase , lowercase ) -> Optional[int]:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _snake_case ( lowercase , lowercase , lowercase ) -> int:
assert PokerHand(lowercase ).compare_with(PokerHand(lowercase ) ) == expected
def _snake_case ( ) -> Union[str, Any]:
__a : Tuple = [PokerHand(lowercase ) for hand in SORTED_HANDS]
__a : Optional[int] = poker_hands.copy()
shuffle(lowercase )
__a : List[str] = chain(sorted(lowercase ) )
for index, hand in enumerate(lowercase ):
assert hand == poker_hands[index]
def _snake_case ( ) -> List[str]:
# Test that five high straights are compared correctly.
__a : Optional[int] = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=lowercase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _snake_case ( ) -> List[str]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
__a : Dict = PokerHand("""2C 4S AS 3D 5C""" )
__a : Dict = True
__a : Optional[int] = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _snake_case ( ) -> Dict:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
__a : Tuple = 0
__a : int = os.path.abspath(os.path.dirname(lowercase ) )
__a : Union[str, Any] = os.path.join(lowercase , """poker_hands.txt""" )
with open(lowercase ) as file_hand:
for line in file_hand:
__a : Union[str, Any] = line[:1_4].strip()
__a : Optional[Any] = line[1_5:].strip()
__a , __a : List[str] = PokerHand(lowercase ), PokerHand(lowercase )
__a : str = player.compare_with(lowercase )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 697 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''Speech2TextFeatureExtractor'''
SCREAMING_SNAKE_CASE__ : Any = '''Speech2TextTokenizer'''
def __init__( self : int , snake_case : List[Any] , snake_case : int ):
"""simple docstring"""
super().__init__(_lowercase , _lowercase )
_snake_case : Tuple = self.feature_extractor
_snake_case : int = False
def __call__( self : List[Any] , *snake_case : List[Any] , **snake_case : List[Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_snake_case : int = kwargs.pop('raw_speech' )
else:
_snake_case : Union[str, Any] = kwargs.pop('audio' , _lowercase )
_snake_case : int = kwargs.pop('sampling_rate' , _lowercase )
_snake_case : Dict = kwargs.pop('text' , _lowercase )
if len(_lowercase ) > 0:
_snake_case : Tuple = args[0]
_snake_case : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_snake_case : str = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if text is not None:
_snake_case : List[str] = self.tokenizer(_lowercase , **_lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_snake_case : List[str] = encodings['input_ids']
return inputs
def __UpperCAmelCase ( self : Any , *snake_case : Union[str, Any] , **snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def __UpperCAmelCase ( self : List[Any] , *snake_case : Dict , **snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_lowercase , **_lowercase )
@contextmanager
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_snake_case : int = True
_snake_case : Tuple = self.tokenizer
yield
_snake_case : Optional[Any] = self.feature_extractor
_snake_case : Optional[int] = False
| 517 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = '''decision_transformer'''
_lowercase : Optional[Any] = ['''past_key_values''']
_lowercase : str = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=17 , _lowercase=4 , _lowercase=128 , _lowercase=4_096 , _lowercase=True , _lowercase=1 , _lowercase=1_024 , _lowercase=3 , _lowercase=1 , _lowercase=None , _lowercase="relu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50_256 , _lowercase=50_256 , _lowercase=False , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = state_dim
_lowerCAmelCase = act_dim
_lowerCAmelCase = hidden_size
_lowerCAmelCase = max_ep_len
_lowerCAmelCase = action_tanh
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 5 | 0 |
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 ,number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 700 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__SCREAMING_SNAKE_CASE : Any =logging.get_logger(__name__)
# General docstring
__SCREAMING_SNAKE_CASE : Union[str, Any] ='''PoolFormerConfig'''
# Base docstring
__SCREAMING_SNAKE_CASE : List[Any] ='''sail/poolformer_s12'''
__SCREAMING_SNAKE_CASE : Union[str, Any] =[1, 512, 7, 7]
# Image classification docstring
__SCREAMING_SNAKE_CASE : Any ='''sail/poolformer_s12'''
__SCREAMING_SNAKE_CASE : Union[str, Any] ='''tabby, tabby cat'''
__SCREAMING_SNAKE_CASE : Tuple =[
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = False ):
if drop_prob == 0.0 or not training:
return input
lowercase = 1 - drop_prob
lowercase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowercase = keep_prob + torch.rand(lowerCAmelCase__ ,dtype=input.dtype ,device=input.device )
random_tensor.floor_() # binarize
lowercase = input.div(lowerCAmelCase__ ) * random_tensor
return output
class A_ ( nn.Module ):
def __init__( self : Union[str, Any] , snake_case__ : Optional[float] = None ):
super().__init__()
lowercase = drop_prob
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : torch.Tensor ):
return drop_path(snake_case__ , self.drop_prob , self.training )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return "p={}".format(self.drop_prob )
class A_ ( nn.Module ):
def __init__( self : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : str , snake_case__ : List[str]=None ):
super().__init__()
lowercase = patch_size if isinstance(snake_case__ , collections.abc.Iterable ) else (patch_size, patch_size)
lowercase = stride if isinstance(snake_case__ , collections.abc.Iterable ) else (stride, stride)
lowercase = padding if isinstance(snake_case__ , collections.abc.Iterable ) else (padding, padding)
lowercase = nn.Convad(snake_case__ , snake_case__ , kernel_size=snake_case__ , stride=snake_case__ , padding=snake_case__ )
lowercase = norm_layer(snake_case__ ) if norm_layer else nn.Identity()
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[Any] ):
lowercase = self.projection(snake_case__ )
lowercase = self.norm(snake_case__ )
return embeddings
class A_ ( nn.GroupNorm ):
def __init__( self : Union[str, Any] , snake_case__ : Dict , **snake_case__ : List[str] ):
super().__init__(1 , snake_case__ , **snake_case__ )
class A_ ( nn.Module ):
def __init__( self : int , snake_case__ : Any ):
super().__init__()
lowercase = nn.AvgPoolad(snake_case__ , stride=1 , padding=pool_size // 2 , count_include_pad=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Union[str, Any] ):
return self.pool(snake_case__ ) - hidden_states
class A_ ( nn.Module ):
def __init__( self : int , snake_case__ : Any , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Dict ):
super().__init__()
lowercase = nn.Convad(snake_case__ , snake_case__ , 1 )
lowercase = nn.Convad(snake_case__ , snake_case__ , 1 )
lowercase = PoolFormerDropPath(snake_case__ )
if isinstance(config.hidden_act , snake_case__ ):
lowercase = ACTaFN[config.hidden_act]
else:
lowercase = config.hidden_act
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case__ : Dict ):
lowercase = self.conva(snake_case__ )
lowercase = self.act_fn(snake_case__ )
lowercase = self.drop(snake_case__ )
lowercase = self.conva(snake_case__ )
lowercase = self.drop(snake_case__ )
return hidden_states
class A_ ( nn.Module ):
def __init__( self : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[str] ):
super().__init__()
lowercase = PoolFormerPooling(snake_case__ )
lowercase = PoolFormerOutput(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase = PoolFormerGroupNorm(snake_case__ )
lowercase = PoolFormerGroupNorm(snake_case__ )
# Useful for training neural nets
lowercase = PoolFormerDropPath(snake_case__ ) if drop_path > 0.0 else nn.Identity()
lowercase = config.use_layer_scale
if config.use_layer_scale:
lowercase = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ )
lowercase = nn.Parameter(
config.layer_scale_init_value * torch.ones((snake_case__) ) , requires_grad=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[str] ):
if self.use_layer_scale:
lowercase = self.pooling(self.before_norm(snake_case__ ) )
lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowercase = hidden_states + self.drop_path(snake_case__ )
lowercase = ()
lowercase = self.output(self.after_norm(snake_case__ ) )
lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowercase = hidden_states + self.drop_path(snake_case__ )
lowercase = (output,) + outputs
return outputs
else:
lowercase = self.drop_path(self.pooling(self.before_norm(snake_case__ ) ) )
# First residual connection
lowercase = pooling_output + hidden_states
lowercase = ()
# Second residual connection inside the PoolFormerOutput block
lowercase = self.drop_path(self.output(self.after_norm(snake_case__ ) ) )
lowercase = hidden_states + layer_output
lowercase = (output,) + outputs
return outputs
class A_ ( nn.Module ):
def __init__( self : List[str] , snake_case__ : Optional[Any] ):
super().__init__()
lowercase = config
# stochastic depth decay rule
lowercase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowercase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowercase = nn.ModuleList(snake_case__ )
# Transformer blocks
lowercase = []
lowercase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowercase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
snake_case__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(snake_case__ ) )
lowercase = nn.ModuleList(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : str , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=True ):
lowercase = () if output_hidden_states else None
lowercase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowercase , lowercase = layers
# Get patch embeddings from hidden_states
lowercase = embedding_layer(snake_case__ )
# Send the embeddings through the blocks
for _, blk in enumerate(snake_case__ ):
lowercase = blk(snake_case__ )
lowercase = layer_outputs[0]
if output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
class A_ ( __a ):
_A :Any = PoolFormerConfig
_A :int = '''poolformer'''
_A :Union[str, Any] = '''pixel_values'''
_A :str = True
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Union[str, Any] ):
if isinstance(snake_case__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(snake_case__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case__ : Any , snake_case__ : Optional[int]=False ):
if isinstance(snake_case__ , snake_case__ ):
lowercase = value
__SCREAMING_SNAKE_CASE : Optional[Any] =R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__SCREAMING_SNAKE_CASE : str =R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __a , )
class A_ ( __a ):
def __init__( self : Union[str, Any] , snake_case__ : int ):
super().__init__(snake_case__ )
lowercase = config
lowercase = PoolFormerEncoder(snake_case__ )
# Initialize weights and apply final processing
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowercase = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , )
lowercase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case__ , hidden_states=encoder_outputs.hidden_states , )
class A_ ( nn.Module ):
def __init__( self : List[str] , snake_case__ : Optional[int] ):
super().__init__()
lowercase = nn.Linear(config.hidden_size , config.hidden_size )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : str ):
lowercase = self.dense(snake_case__ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , __a , )
class A_ ( __a ):
def __init__( self : Dict , snake_case__ : Any ):
super().__init__(snake_case__ )
lowercase = config.num_labels
lowercase = PoolFormerModel(snake_case__ )
# Final norm
lowercase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowercase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : Optional[torch.LongTensor] = None , snake_case__ : Optional[bool] = None , snake_case__ : Optional[bool] = None , ):
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.poolformer(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , )
lowercase = outputs[0]
lowercase = self.classifier(self.norm(snake_case__ ).mean([-2, -1] ) )
lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase = """single_label_classification"""
else:
lowercase = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase = MSELoss()
if self.num_labels == 1:
lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase = loss_fct(snake_case__ , snake_case__ )
elif self.config.problem_type == "single_label_classification":
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase = BCEWithLogitsLoss()
lowercase = loss_fct(snake_case__ , snake_case__ )
if not return_dict:
lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 72 | 0 |
"""simple docstring"""
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Union[str, Any] ):
_snake_case = logging.get_logger()
# the current default level is logging.WARNING
_snake_case = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(__UpperCAmelCase )
def lowercase ( self : str ):
_snake_case = logging.get_verbosity()
_snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
_snake_case = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(__UpperCAmelCase )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def lowercase ( self : Optional[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
_snake_case = os.getenv('''TRANSFORMERS_VERBOSITY''' , __UpperCAmelCase )
_snake_case = logging.log_levels[env_level_str]
_snake_case = logging.get_verbosity()
self.assertEqual(
__UpperCAmelCase , __UpperCAmelCase , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
_snake_case = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def lowercase ( self : List[Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_snake_case = logging.logging.getLogger()
with CaptureLogger(__UpperCAmelCase ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def lowercase ( self : List[str] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_snake_case = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
_snake_case = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning_advice(__UpperCAmelCase )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(__UpperCAmelCase ) as cl:
logger.warning_advice(__UpperCAmelCase )
self.assertEqual(cl.out , msg + '''\n''' )
def _UpperCAmelCase ( ) -> List[Any]:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 224 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = "x" , lowerCamelCase_ = 10**-10 , lowerCamelCase_ = 1 , ):
A : int = symbols(lowerCamelCase_ )
A : List[str] = lambdify(lowerCamelCase_ , lowerCamelCase_ )
A : Tuple = lambdify(lowerCamelCase_ , diff(lowerCamelCase_ , lowerCamelCase_ ) )
A : Optional[int] = starting_point
while True:
if diff_function(lowerCamelCase_ ) != 0:
A : Union[str, Any] = prev_guess - multiplicity * func(lowerCamelCase_ ) / diff_function(
lowerCamelCase_ )
else:
raise ZeroDivisionError('''Could not find root''' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A : Tuple = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(F"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"{newton_raphson('exp(x) - 1', 10, precision=0.0_05)}",
)
# Find root of cos(x)
print(F"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 542 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["flax", "transformers"]
def __init__( self : Tuple, *_UpperCAmelCase : Any, **_UpperCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ["flax", "transformers"] )
@classmethod
def A_ ( cls : Optional[Any], *_UpperCAmelCase : List[str], **_UpperCAmelCase : Any ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ["flax", "transformers"] )
@classmethod
def A_ ( cls : Any, *_UpperCAmelCase : List[str], **_UpperCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ["flax", "transformers"] )
class lowerCamelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["flax", "transformers"]
def __init__( self : Tuple, *_UpperCAmelCase : str, **_UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
requires_backends(self, ["flax", "transformers"] )
@classmethod
def A_ ( cls : int, *_UpperCAmelCase : Union[str, Any], **_UpperCAmelCase : Dict ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ["flax", "transformers"] )
@classmethod
def A_ ( cls : str, *_UpperCAmelCase : Union[str, Any], **_UpperCAmelCase : List[str] ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ["flax", "transformers"] )
class lowerCamelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["flax", "transformers"]
def __init__( self : Optional[Any], *_UpperCAmelCase : Optional[Any], **_UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(self, ["flax", "transformers"] )
@classmethod
def A_ ( cls : Optional[Any], *_UpperCAmelCase : List[str], **_UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ["flax", "transformers"] )
@classmethod
def A_ ( cls : Any, *_UpperCAmelCase : List[str], **_UpperCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ["flax", "transformers"] )
class lowerCamelCase (metaclass=__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["flax", "transformers"]
def __init__( self : str, *_UpperCAmelCase : Optional[Any], **_UpperCAmelCase : Any ) -> Any:
"""simple docstring"""
requires_backends(self, ["flax", "transformers"] )
@classmethod
def A_ ( cls : Optional[int], *_UpperCAmelCase : Dict, **_UpperCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
requires_backends(cls, ["flax", "transformers"] )
@classmethod
def A_ ( cls : List[str], *_UpperCAmelCase : List[str], **_UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ["flax", "transformers"] )
| 157 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "data2vec-audio"
def __init__( self : List[str], _UpperCAmelCase : Optional[Any]=3_2, _UpperCAmelCase : str=7_6_8, _UpperCAmelCase : Dict=1_2, _UpperCAmelCase : List[Any]=1_2, _UpperCAmelCase : Dict=3_0_7_2, _UpperCAmelCase : str="gelu", _UpperCAmelCase : Union[str, Any]=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.1, _UpperCAmelCase : Tuple=0.0, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Any=0.02, _UpperCAmelCase : Tuple=1E-5, _UpperCAmelCase : Union[str, Any]="gelu", _UpperCAmelCase : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2), _UpperCAmelCase : str=(5, 2, 2, 2, 2, 2, 2), _UpperCAmelCase : int=(1_0, 3, 3, 3, 3, 2, 2), _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : List[str]=1_6, _UpperCAmelCase : Any=1_9, _UpperCAmelCase : List[Any]=5, _UpperCAmelCase : Dict=0.05, _UpperCAmelCase : Union[str, Any]=1_0, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : Optional[Any]=0.0, _UpperCAmelCase : List[Any]=1_0, _UpperCAmelCase : Optional[Any]=0, _UpperCAmelCase : Optional[Any]="sum", _UpperCAmelCase : str=False, _UpperCAmelCase : Any=False, _UpperCAmelCase : Optional[int]=2_5_6, _UpperCAmelCase : Optional[int]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0), _UpperCAmelCase : int=(5, 3, 3, 1, 1), _UpperCAmelCase : Optional[int]=(1, 2, 3, 1, 1), _UpperCAmelCase : Optional[Any]=5_1_2, _UpperCAmelCase : int=0, _UpperCAmelCase : Tuple=1, _UpperCAmelCase : Optional[int]=2, _UpperCAmelCase : List[str]=False, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Any=2, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Dict=None, **_UpperCAmelCase : Any, ) -> Any:
"""simple docstring"""
super().__init__(**_UpperCAmelCase, pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_extract_activation
SCREAMING_SNAKE_CASE__ : Optional[int] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = conv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ : Tuple = conv_pos_kernel_size
SCREAMING_SNAKE_CASE__ : List[str] = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout
SCREAMING_SNAKE_CASE__ : int = attention_dropout
SCREAMING_SNAKE_CASE__ : Dict = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = feat_proj_dropout
SCREAMING_SNAKE_CASE__ : List[str] = final_dropout
SCREAMING_SNAKE_CASE__ : Tuple = layerdrop
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ : int = mask_time_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask_time_length
SCREAMING_SNAKE_CASE__ : List[str] = mask_time_min_masks
SCREAMING_SNAKE_CASE__ : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE__ : Dict = mask_feature_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE__ : Optional[int] = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ : List[Any] = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE__ : int = add_adapter
SCREAMING_SNAKE_CASE__ : Dict = adapter_kernel_size
SCREAMING_SNAKE_CASE__ : Optional[int] = adapter_stride
SCREAMING_SNAKE_CASE__ : Dict = num_adapter_layers
SCREAMING_SNAKE_CASE__ : Dict = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE__ : Any = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = xvector_output_dim
@property
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
return math.prod(self.conv_stride )
| 157 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : int = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 540 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__(a_, a_, unittest.TestCase ):
"""simple docstring"""
_A : Optional[Any] = StableDiffusionSAGPipeline
_A : Any = TEXT_TO_IMAGE_PARAMS
_A : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Optional[int] = False
def UpperCamelCase__ ( self ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a_ : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
a_ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ : Union[str, Any] = CLIPTextModel(_lowercase )
a_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a_ : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Dict:
if str(_lowercase ).startswith("""mps""" ):
a_ : Optional[int] = torch.manual_seed(_lowercase )
else:
a_ : str = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Tuple = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Tuple = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
a_ : List[str] = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Optional[int] = """."""
a_ : int = torch.manual_seed(0 )
a_ : Any = sag_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
a_ : Any = output.images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a_ : str = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
a_ : List[str] = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
a_ : int = """."""
a_ : Dict = torch.manual_seed(0 )
a_ : Union[str, Any] = sag_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
a_ : Optional[Any] = output.images
a_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a_ : Dict = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCamelCase__ ( self ) -> Any:
a_ : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
a_ : Optional[Any] = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[Any] = """."""
a_ : str = torch.manual_seed(0 )
a_ : int = sag_pipe(
[prompt] , width=768 , height=512 , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
a_ : Any = output.images
assert image.shape == (1, 512, 768, 3)
| 540 | 1 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def a__ ( *_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCamelCase = list(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
UpperCamelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def a__ ( _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 128 ):
"""simple docstring"""
if function is None:
return functools.partial(lowerCamelCase__ , starting_batch_size=lowerCamelCase__ )
UpperCamelCase = starting_batch_size
def decorator(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCamelCase = list(inspect.signature(lowerCamelCase__ ).parameters.keys() )
# Guard against user error
if len(lowerCamelCase__ ) < (len(lowerCamelCase__ ) + 1):
UpperCamelCase = ", ".join([F"{arg}={value}" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"Batch size was passed into `{function.__name__}` as the first argument when called."
F"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
except Exception as e:
if should_reduce_batch_size(lowerCamelCase__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 715 |
"""simple docstring"""
lowerCAmelCase__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 544 | 0 |
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def A ( ):
"""simple docstring"""
snake_case_ :Optional[Any] = 9
snake_case_ :Optional[int] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
snake_case_ :List[Any] = kruskal(_A, _A )
snake_case_ :Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(_A ) == sorted(_A )
| 584 |
"""simple docstring"""
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
snake_case_ :Optional[Any] = 0
snake_case_ :Dict = 0
snake_case_ :Any = {}
def _a ( self , a ):
"""simple docstring"""
if vertex not in self.adjacency:
snake_case_ :str = {}
self.num_vertices += 1
def _a ( self , a , a , a ):
"""simple docstring"""
self.add_vertex(a )
self.add_vertex(a )
if head == tail:
return
snake_case_ :List[Any] = weight
snake_case_ :str = weight
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = self.get_edges()
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Any = edge
edges.remove((tail, head, weight) )
for i in range(len(a ) ):
snake_case_ :Dict = list(edges[i] )
edges.sort(key=lambda a : e[2] )
for i in range(len(a ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
snake_case_ :Tuple = edges[i][2] + 1
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Any = edge
snake_case_ :Dict = weight
snake_case_ :int = weight
def __str__( self ):
"""simple docstring"""
snake_case_ :List[Any] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
snake_case_ :List[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip("\n" )
def _a ( self ):
"""simple docstring"""
snake_case_ :int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _a ( self ):
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def _a ( a=None , a=None ):
"""simple docstring"""
snake_case_ :Optional[Any] = Graph()
if vertices is None:
snake_case_ :int = []
if edges is None:
snake_case_ :Any = []
for vertex in vertices:
g.add_vertex(a )
for edge in edges:
g.add_edge(*a )
return g
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
snake_case_ :Dict = {}
snake_case_ :Optional[int] = {}
def __len__( self ):
"""simple docstring"""
return len(self.parent )
def _a ( self , a ):
"""simple docstring"""
if item in self.parent:
return self.find(a )
snake_case_ :Optional[Any] = item
snake_case_ :str = 0
return item
def _a ( self , a ):
"""simple docstring"""
if item not in self.parent:
return self.make_set(a )
if item != self.parent[item]:
snake_case_ :Optional[int] = self.find(self.parent[item] )
return self.parent[item]
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :Any = self.find(a )
snake_case_ :str = self.find(a )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
snake_case_ :Union[str, Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
snake_case_ :Any = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
snake_case_ :Union[str, Any] = roota
return roota
return None
@staticmethod
def _a ( a ):
"""simple docstring"""
snake_case_ :Any = graph.num_vertices
snake_case_ :Any = Graph.UnionFind()
snake_case_ :Optional[Any] = []
while num_components > 1:
snake_case_ :List[Any] = {}
for vertex in graph.get_vertices():
snake_case_ :str = -1
snake_case_ :Tuple = graph.get_edges()
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Optional[int] = edge
edges.remove((tail, head, weight) )
for edge in edges:
snake_case_ , snake_case_ , snake_case_ :Optional[int] = edge
snake_case_ :List[Any] = union_find.find(a )
snake_case_ :Union[str, Any] = union_find.find(a )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case_ :Optional[int] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
snake_case_ :Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
snake_case_ , snake_case_ , snake_case_ :List[Any] = cheap_edge[vertex]
if union_find.find(a ) != union_find.find(a ):
union_find.union(a , a )
mst_edges.append(cheap_edge[vertex] )
snake_case_ :List[Any] = num_components - 1
snake_case_ :Any = Graph.build(edges=a )
return mst
| 584 | 1 |
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowerCamelCase = ''''''
_lowerCamelCase = ''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(snake_case_ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowerCamelCase = 0, 0
# length[i] shows the length of palindromic substring with center i
_lowerCamelCase = [1 for i in range(len(snake_case_ ) )]
# for each character in new_string find corresponding palindromic string
_lowerCamelCase = 0
for j in range(len(snake_case_ ) ):
_lowerCamelCase = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(snake_case_ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowerCamelCase = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowerCamelCase = j - k + 1 # noqa: E741
_lowerCamelCase = j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowerCamelCase = length[j]
_lowerCamelCase = j
# create that string
_lowerCamelCase = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
def _lowerCamelCase ( _a ):
"""simple docstring"""
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
_lowerCamelCase = gray_code_sequence_string(_a )
#
# convert them to integers
for i in range(len(_a ) ):
_lowerCamelCase = int(sequence[i] , 2 )
return sequence
def _lowerCamelCase ( _a ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_lowerCamelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_lowerCamelCase = gray_code_sequence_string(bit_count - 1 )
_lowerCamelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_lowerCamelCase = '''0''' + smaller_sequence[i]
sequence.append(_a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_lowerCamelCase = '''1''' + smaller_sequence[i]
sequence.append(_a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 | 0 |
'''simple docstring'''
def __snake_case ( _UpperCAmelCase : list[int], _UpperCAmelCase : list[int]):
UpperCamelCase = len(_UpperCAmelCase)
print('''The following activities are selected:''')
# The first activity is always selected
UpperCamelCase = 0
print(_UpperCAmelCase, end=''',''')
# Consider rest of the activities
for j in range(_UpperCAmelCase):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_UpperCAmelCase, end=''',''')
UpperCamelCase = j
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Tuple = [1, 3, 0, 5, 8, 5]
snake_case_ : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 212 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : Any = {
'naver-clova-ix/donut-base': 'https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = '''donut-swin'''
_snake_case = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCamelCase__=2_2_4 , lowerCamelCase__=4 , lowerCamelCase__=3 , lowerCamelCase__=9_6 , lowerCamelCase__=[2, 2, 6, 2] , lowerCamelCase__=[3, 6, 1_2, 2_4] , lowerCamelCase__=7 , lowerCamelCase__=4.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=0.02 , lowerCamelCase__=1e-5 , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = embed_dim
UpperCamelCase = depths
UpperCamelCase = len(lowerCamelCase__ )
UpperCamelCase = num_heads
UpperCamelCase = window_size
UpperCamelCase = mlp_ratio
UpperCamelCase = qkv_bias
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = drop_path_rate
UpperCamelCase = hidden_act
UpperCamelCase = use_absolute_embeddings
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase__ ) - 1) )
| 212 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a : str = logging.get_logger(__name__)
a : List[str] = {'''vocab_file''': '''spiece.model'''}
a : Optional[int] = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class a_ ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int=False , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Optional[Any]="<s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : int="<unk>" , __UpperCamelCase : List[Any]="<sep>" , __UpperCamelCase : Optional[int]="<pad>" , __UpperCamelCase : List[str]="<cls>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : List[Any]=["<eop>", "<eod>"] , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Any , ) ->None:
'''simple docstring'''
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_UpperCAmelCase = jieba
_UpperCAmelCase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
return len(self.sp_model )
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
if self.remove_space:
_UpperCAmelCase = """ """.join(inputs.strip().split() )
else:
_UpperCAmelCase = inputs
_UpperCAmelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_UpperCAmelCase = unicodedata.normalize("""NFKD""" , __UpperCamelCase )
_UpperCAmelCase = """""".join([c for c in outputs if not unicodedata.combining(__UpperCamelCase )] )
if self.do_lower_case:
_UpperCAmelCase = outputs.lower()
return outputs
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = self.preprocess_text(__UpperCamelCase )
_UpperCAmelCase = self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
_UpperCAmelCase = []
for piece in pieces:
if len(__UpperCamelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_UpperCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCamelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_UpperCAmelCase = cur_pieces[1:]
else:
_UpperCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCamelCase )
else:
new_pieces.append(__UpperCamelCase )
return new_pieces
def _snake_case ( self : int , __UpperCamelCase : str ) ->str:
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCamelCase )
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = """""".join(__UpperCamelCase ).replace(__UpperCamelCase , """ """ ).strip()
return out_string
def _snake_case ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1, 1]
return ([0] * len(__UpperCamelCase )) + [1, 1]
def _snake_case ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = super()._decode(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text | 711 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) ) | 19 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__snake_case ):
A__ = ['onnx']
def __init__( self : Union[str, Any] , *_a : Tuple , **_a : Tuple ) -> Any:
'''simple docstring'''
requires_backends(self , ['onnx'] )
@classmethod
def A ( cls : Dict , *_a : Optional[int] , **_a : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['onnx'] )
@classmethod
def A ( cls : Optional[Any] , *_a : int , **_a : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['onnx'] )
| 405 |
from math import pi, sqrt, tan
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_A = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_A = (sidea + sidea + sidea) / 2
_A = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print("\nSurface Areas of various geometric shapes: \n")
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 27 | 0 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
'''simple docstring'''
A__ = 0
if start < end:
A__ = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = a[end]
A__ = a[pivot]
A__ = temp
A__ , A__ = _in_place_partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE__ , p + 1 , SCREAMING_SNAKE_CASE__ )
return count
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
'''simple docstring'''
A__ = 0
A__ = randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = a[end]
A__ = a[pivot]
A__ = temp
A__ = start - 1
for index in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
A__ = new_pivot_index + 1
A__ = a[new_pivot_index]
A__ = a[index]
A__ = temp
A__ = a[new_pivot_index + 1]
A__ = a[end]
A__ = temp
return new_pivot_index + 1, count
lowercase_ = TemporaryFile()
lowercase_ = 100 # 1000 elements are to be sorted
lowercase_ , lowercase_ = 0, 1 # mean and standard deviation
lowercase_ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
lowercase_ = np.load(outfile)
lowercase_ = len(M) - 1
lowercase_ = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 703 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
A__ = 128
elif "12-12" in model_name:
A__ = 12
A__ = 12
elif "14-14" in model_name:
A__ = 14
A__ = 14
elif "16-16" in model_name:
A__ = 16
A__ = 16
else:
raise ValueError('Model not supported' )
A__ = 'huggingface/label-files'
if "speech-commands" in model_name:
A__ = 35
A__ = 'speech-commands-v2-id2label.json'
else:
A__ = 527
A__ = 'audioset-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
'''simple docstring'''
if "module.v" in name:
A__ = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
A__ = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
A__ = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
A__ = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
A__ = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[3] )
A__ = config.hidden_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> Tuple:
'''simple docstring'''
A__ = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> int:
'''simple docstring'''
A__ = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ )
A__ = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE__ )
# rename some keys
A__ = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load 🤗 model
A__ = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
A__ = -4.267_7393 if 'speech-commands' not in model_name else -6.84_5978
A__ = 4.568_9974 if 'speech-commands' not in model_name else 5.565_4526
A__ = 1024 if 'speech-commands' not in model_name else 128
A__ = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
if "speech-commands" in model_name:
A__ = load_dataset('speech_commands' , 'v0.02' , split='validation' )
A__ = dataset[0]['audio']['array']
else:
A__ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
A__ , A__ = torchaudio.load(SCREAMING_SNAKE_CASE__ )
A__ = waveform.squeeze().numpy()
A__ = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=16000 , return_tensors='pt' )
# forward pass
A__ = model(**SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
A__ = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
A__ = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
A__ = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
A__ = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
A__ = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
A__ = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
A__ = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
A__ = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 586 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
if len(__lowerCAmelCase ) < k or k < 0:
raise ValueError('''Invalid Input''' )
snake_case__ = snake_case__ = sum(array[:k] )
for i in range(len(__lowerCAmelCase ) - k ):
snake_case__ = current_sum - array[i] + array[i + k]
snake_case__ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCamelCase__ : List[Any] = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
lowerCamelCase__ : List[Any] = randint(0, 1_1_0)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 33 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ : int = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 73 | 0 |
"""simple docstring"""
import functools
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> int:
"""simple docstring"""
UpperCamelCase = len(UpperCAmelCase_ )
UpperCamelCase = len(UpperCAmelCase_ )
@functools.cache
def min_distance(UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCamelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , UpperCAmelCase_ ) , 1 + min_distance(UpperCAmelCase_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Optional[int] = '''upernet'''
def __init__( self : int , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : List[Any]=[1, 2, 3, 6] , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=0.4 , UpperCAmelCase_ : Union[str, Any]=384 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Optional[int]=1 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Tuple=255 , **UpperCAmelCase_ : str , )-> Tuple:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = backbone_config.get("model_type" )
UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase = config_class.from_dict(UpperCAmelCase_ )
UpperCamelCase = backbone_config
UpperCamelCase = hidden_size
UpperCamelCase = initializer_range
UpperCamelCase = pool_scales
UpperCamelCase = use_auxiliary_head
UpperCamelCase = auxiliary_loss_weight
UpperCamelCase = auxiliary_in_channels
UpperCamelCase = auxiliary_channels
UpperCamelCase = auxiliary_num_convs
UpperCamelCase = auxiliary_concat_input
UpperCamelCase = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.backbone_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 556 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __UpperCamelCase ( A__ , A__ , unittest.TestCase ):
__A : Union[str, Any] = StableDiffusionPanoramaPipeline
__A : Tuple = TEXT_TO_IMAGE_PARAMS
__A : str = TEXT_TO_IMAGE_BATCH_PARAMS
__A : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
_UpperCAmelCase = DDIMScheduler()
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase = CLIPTextModel(_UpperCamelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase=0 ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**_UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = sd_pipe(**_UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**_UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = '''french fries'''
_UpperCAmelCase = sd_pipe(**_UpperCamelCase , negative_prompt=_UpperCamelCase )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**_UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = sd_pipe(**_UpperCamelCase , view_batch_size=2 )
_UpperCAmelCase = output.images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**_UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = sd_pipe(**_UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCamelCase )
_UpperCAmelCase = StableDiffusionPanoramaPipeline(**_UpperCamelCase )
_UpperCAmelCase = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
_UpperCAmelCase = self.get_dummy_inputs(_UpperCamelCase )
_UpperCAmelCase = sd_pipe(**_UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase( self , _UpperCamelCase=0 ):
_UpperCAmelCase = torch.manual_seed(_UpperCamelCase )
_UpperCAmelCase = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase( self ):
_UpperCAmelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCAmelCase = DDIMScheduler.from_pretrained(_UpperCamelCase , subfolder='''scheduler''' )
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCamelCase , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**_UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCAmelCase = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def UpperCamelCase( self ):
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCamelCase )
_UpperCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**_UpperCamelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCAmelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase( self ):
_UpperCAmelCase = 0
def callback_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> None:
_UpperCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_UpperCAmelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCAmelCase = latents[0, -3:, -3:, -1]
_UpperCAmelCase = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_UpperCAmelCase = False
_UpperCAmelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCAmelCase = DDIMScheduler.from_pretrained(_UpperCamelCase , subfolder='''scheduler''' )
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCamelCase , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
_UpperCAmelCase = self.get_inputs()
pipe(**_UpperCamelCase , callback=_UpperCamelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase = '''stabilityai/stable-diffusion-2-base'''
_UpperCAmelCase = DDIMScheduler.from_pretrained(_UpperCamelCase , subfolder='''scheduler''' )
_UpperCAmelCase = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCamelCase , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
_UpperCAmelCase = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase = self.get_inputs()
_UpperCAmelCase = pipe(**_UpperCamelCase )
_UpperCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9 | 32 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __magic_name__ ( _lowerCamelCase : Callable[[int | float], int | float] , _lowerCamelCase : int | float , _lowerCamelCase : int | float , _lowerCamelCase : int = 1_0_0 , ):
__a : int = x_start
__a : Optional[Any] = fnc(_lowerCamelCase )
__a : Tuple = 0.0
for _ in range(_lowerCamelCase ):
# Approximates curve as a sequence of linear lines and sums their length
__a : Dict = (x_end - x_start) / steps + xa
__a : List[Any] = fnc(_lowerCamelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
__a : int = xa
__a : Union[str, Any] = fxa
return length
if __name__ == "__main__":
def __magic_name__ ( _lowerCamelCase : List[str] ):
return math.sin(1_0 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowercase__ = 10
while i <= 100000:
print(f'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 581 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
_a : Optional[int] = 2
_a : str = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__a )
if n > 1:
factors.append(__a )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 319 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A = logging.get_logger(__name__)
A = {
'''shi-labs/dinat-mini-in1k-224''': '''https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json''',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class __lowercase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''dinat'''
__lowerCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _UpperCAmelCase=4 , _UpperCAmelCase=3 , _UpperCAmelCase=64 , _UpperCAmelCase=[3, 4, 6, 5] , _UpperCAmelCase=[2, 4, 8, 16] , _UpperCAmelCase=7 , _UpperCAmelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _UpperCAmelCase=3.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Union[str, Any] = patch_size
__a : Union[str, Any] = num_channels
__a : str = embed_dim
__a : Dict = depths
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[Any] = num_heads
__a : List[str] = kernel_size
__a : List[Any] = dilations
__a : int = mlp_ratio
__a : Optional[int] = qkv_bias
__a : Optional[int] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[Any] = drop_path_rate
__a : List[str] = hidden_act
__a : Union[str, Any] = layer_norm_eps
__a : Any = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__a : List[str] = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
__a : Optional[Any] = layer_scale_init_value
__a : Optional[Any] = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
__a , __a : Any = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names ) | 52 |
import math
from collections.abc import Callable
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float:
"""simple docstring"""
lowerCamelCase__ : float = xa
lowerCamelCase__ : float = xa
while True:
if x_n == x_na or function(UpperCAmelCase ) == function(UpperCAmelCase ):
raise ZeroDivisionError('''float division by zero, could not find root''' )
lowerCamelCase__ : float = x_na - (
function(UpperCAmelCase ) / ((function(UpperCAmelCase ) - function(UpperCAmelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
lowerCamelCase__ : Any = x_na
lowerCamelCase__ : int = x_na
def _a ( UpperCAmelCase ) -> float:
"""simple docstring"""
return math.pow(UpperCAmelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 315 | 0 |
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def _lowercase ( ) -> tuple[list[int], int]:
"""simple docstring"""
__UpperCAmelCase : str = [randint(-1000 , 1000 ) for i in range(10 )]
__UpperCAmelCase : Dict = randint(-5000 , 5000 )
return (arr, r)
_a : str = make_dataset()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> tuple[int, ...]:
"""simple docstring"""
for triplet in permutations(lowerCamelCase__ , 3 ):
if sum(lowerCamelCase__ ) == target:
return tuple(sorted(lowerCamelCase__ ) )
return (0, 0, 0)
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> tuple[int, int, int]:
"""simple docstring"""
arr.sort()
__UpperCAmelCase : List[Any] = len(lowerCamelCase__ )
for i in range(n - 1 ):
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def _lowercase ( ) -> tuple[float, float]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
__UpperCAmelCase : Optional[Any] = "\ntriplet_sum1(*dataset)\n"
__UpperCAmelCase : List[Any] = "\ntriplet_sum2(*dataset)\n"
__UpperCAmelCase : Any = repeat(setup=lowerCamelCase__ , stmt=lowerCamelCase__ , repeat=5 , number=1_0000 )
__UpperCAmelCase : int = repeat(setup=lowerCamelCase__ , stmt=lowerCamelCase__ , repeat=5 , number=1_0000 )
return (min(lowerCamelCase__ ), min(lowerCamelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_a : Tuple = solution_times()
print(f"""The time for naive implementation is {times[0]}.""")
print(f"""The time for optimized implementation is {times[1]}.""")
| 10 | '''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_a : Union[str, Any] = HfApi()
_a : int = {}
# fmt: off
_a : Optional[int] = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
_a : Optional[Any] = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
_a : int = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
_a : str = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
_a : Union[str, Any] = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
_a : Any = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
_a : List[Any] = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
_a : Optional[int] = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
_a : Tuple = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
_a : List[Any] = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
_a : Optional[Any] = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
_a : Union[str, Any] = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
_a : Optional[int] = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
_a : Union[str, Any] = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
_a : str = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
_a : Optional[Any] = api.list_models(filter="diffusers")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_a : List[str] = "/home/patrick/google_checkpoints/" + mod.modelId.split("/")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("CompVis"):
_a : int = UNetaDModel.from_pretrained(local_checkpoint, subfolder="unet")
else:
_a : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_a : str = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_a : str = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_a : str = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["_".join("_".join(mod.modelId.split("/")).split("-"))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 10 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[int] = 'xglm'
a :int = ['past_key_values']
a :Union[str, Any] = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2_5_6_0_0_8 , SCREAMING_SNAKE_CASE_ : List[str]=2_0_4_8 , SCREAMING_SNAKE_CASE_ : List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE_ : str=4_0_9_6 , SCREAMING_SNAKE_CASE_ : int=2_4 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE_ : Any=2 , **SCREAMING_SNAKE_CASE_ : str , ) -> str:
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = d_model
lowercase_ = ffn_dim
lowercase_ = num_layers
lowercase_ = attention_heads
lowercase_ = activation_function
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = layerdrop
lowercase_ = init_std
lowercase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ = use_cache
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 97 |
from __future__ import annotations
from math import pi, sqrt
def a ( snake_case__: float , snake_case__: float ):
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = GenerationConfig(
do_sample=__A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , config_name=__A )
__UpperCAmelCase = GenerationConfig.from_pretrained(__A , config_name=__A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , __A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = AutoConfig.from_pretrained('gpt2' )
__UpperCAmelCase = GenerationConfig.from_model_config(__A )
__UpperCAmelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__A , __A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __lowerCamelCase ( self ):
__UpperCAmelCase = GenerationConfig()
__UpperCAmelCase = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
__UpperCAmelCase = copy.deepcopy(__A )
__UpperCAmelCase = generation_config.update(**__A )
# update_kwargs was not modified (no side effects)
self.assertEqual(__A , __A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__A , {'foo': 'bar'} )
def __lowerCamelCase ( self ):
__UpperCAmelCase = GenerationConfig()
__UpperCAmelCase = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(__A )
__UpperCAmelCase = GenerationConfig.from_pretrained(__A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
__UpperCAmelCase = GenerationConfig.from_model_config(__A )
assert not hasattr(__A , 'foo' ) # no new kwargs should be initialized if from config
def __lowerCamelCase ( self ):
__UpperCAmelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __A )
self.assertEqual(default_config.num_beams , 1 )
__UpperCAmelCase = GenerationConfig(
do_sample=__A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
__UpperCAmelCase = GenerationConfig.from_pretrained(__A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class UpperCAmelCase ( unittest.TestCase ):
@classmethod
def __lowerCamelCase ( cls ):
__UpperCAmelCase = TOKEN
HfFolder.save_token(__A )
@classmethod
def __lowerCamelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def __lowerCamelCase ( self ):
__UpperCAmelCase = GenerationConfig(
do_sample=__A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
__UpperCAmelCase = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='test-generation-config' , push_to_hub=__A , use_auth_token=self._token )
__UpperCAmelCase = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def __lowerCamelCase ( self ):
__UpperCAmelCase = GenerationConfig(
do_sample=__A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
__UpperCAmelCase = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-generation-config-org' , push_to_hub=__A , use_auth_token=self._token )
__UpperCAmelCase = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A: List[str] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Union[str, Any] = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_A: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 617 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__a :List[str] = logging.get_logger(__name__)
@add_end_docstrings(snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Any , **UpperCAmelCase : List[str] ):
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(UpperCAmelCase )
def __call__( self : Optional[int] , UpperCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase : Union[str, List[str]] = None , **UpperCAmelCase : List[Any] , ):
if "text_queries" in kwargs:
A_ = kwargs.pop("text_queries" )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
A_ = {"image": image, "candidate_labels": candidate_labels}
else:
A_ = image
A_ = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def __A ( self : int , **UpperCAmelCase : Tuple ):
A_ = {}
if "threshold" in kwargs:
A_ = kwargs["threshold"]
if "top_k" in kwargs:
A_ = kwargs["top_k"]
return {}, {}, postprocess_params
def __A ( self : List[str] , UpperCAmelCase : Dict ):
A_ = load_image(inputs["image"] )
A_ = inputs["candidate_labels"]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
A_ = candidate_labels.split("," )
A_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
A_ = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
A_ = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def __A ( self : str , UpperCAmelCase : int ):
A_ = model_inputs.pop("target_size" )
A_ = model_inputs.pop("candidate_label" )
A_ = model_inputs.pop("is_last" )
A_ = self.model(**UpperCAmelCase )
A_ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def __A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[int]=None ):
A_ = []
for model_output in model_outputs:
A_ = model_output["candidate_label"]
A_ = BaseModelOutput(UpperCAmelCase )
A_ = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
A_ = outputs["scores"][index].item()
A_ = self._get_bounding_box(outputs["boxes"][index][0] )
A_ = {"score": score, "label": label, "box": box}
results.append(UpperCAmelCase )
A_ = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
A_ = results[:top_k]
return results
def __A ( self : List[str] , UpperCAmelCase : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ = box.int().tolist()
A_ = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox | 86 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> float:
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 453 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __magic_name__( unittest.TestCase ):
@slow
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
snake_case__ = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(__UpperCamelCase )
from datasets import load_dataset
snake_case__ = load_dataset("""nielsr/rvlcdip-demo""" )
snake_case__ = dataset["""train"""][0]["""image"""].convert("""RGB""" )
snake_case__ = image_processor(__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
snake_case__ = model(**__UpperCamelCase )
snake_case__ = outputs.logits
snake_case__ = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , __UpperCamelCase )
snake_case__ = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=__UpperCamelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 ) ) | 566 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def snake_case__ ( a ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def snake_case__ ( a , a , a ) -> np.ndarray:
'''simple docstring'''
snake_case__ = np.nan
for i in range(a ):
snake_case__ = features[:, labels == i]
snake_case__ = data.mean(1 )
# Centralize the data of class i
snake_case__ = data - column_reshape(a )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(a , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case__ = np.dot(a , centered_data.T )
return covariance_sum / features.shape[1]
def snake_case__ ( a , a , a ) -> np.ndarray:
'''simple docstring'''
snake_case__ = features.mean(1 )
snake_case__ = np.nan
for i in range(a ):
snake_case__ = features[:, labels == i]
snake_case__ = data.shape[1]
snake_case__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(a ) - column_reshape(a ) , (column_reshape(a ) - column_reshape(a )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
snake_case__ = device_data * np.dot(
column_reshape(a ) - column_reshape(a ) , (column_reshape(a ) - column_reshape(a )).T , )
return covariance_sum / features.shape[1]
def snake_case__ ( a , a ) -> np.ndarray:
'''simple docstring'''
if features.any():
snake_case__ = features.mean(1 )
# Center the dataset
snake_case__ = features - np.reshape(a , (data_mean.size, 1) )
snake_case__ = np.dot(a , centered_data.T ) / features.shape[1]
snake_case__ , snake_case__ = np.linalg.eigh(a )
# Take all the columns in the reverse order (-1), and then takes only the first
snake_case__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
snake_case__ = np.dot(filtered_eigenvectors.T , a )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=a )
logging.error("""Dataset empty""" )
raise AssertionError
def snake_case__ ( a , a , a , a ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
snake_case__ , snake_case__ = eigh(
covariance_between_classes(a , a , a ) , covariance_within_classes(a , a , a ) , )
snake_case__ = eigenvectors[:, ::-1][:, :dimensions]
snake_case__ , snake_case__ , snake_case__ = np.linalg.svd(a )
snake_case__ = svd_matrix[:, 0:dimensions]
snake_case__ = np.dot(filtered_svd_matrix.T , a )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=a )
logging.error("""Dataset empty""" )
raise AssertionError
def snake_case__ ( ) -> None:
'''simple docstring'''
snake_case__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
snake_case__ = np.array([0, 0, 0, 1, 1] )
snake_case__ = 2
snake_case__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(a ) as error_info:
snake_case__ = linear_discriminant_analysis(
a , a , a , a )
if isinstance(a , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def snake_case__ ( ) -> None:
'''simple docstring'''
snake_case__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
snake_case__ = 2
snake_case__ = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(a ) as error_info:
snake_case__ = principal_component_analysis(a , a )
if not np.allclose(a , a ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod() | 566 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ) -> Any:
A : Any = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_28, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_42, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
A : Optional[int] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_28,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_42,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ) -> Optional[Any]:
A : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__UpperCAmelCase ) , x.transpose() ) )
A : int = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def snake_case ( self ) -> Optional[Any]:
A : Union[str, Any] = np.random.randn(3 , 4 )
A : List[str] = torch.tensor(__UpperCAmelCase )
self.assertTrue(np.allclose(transpose(__UpperCAmelCase ) , transpose(__UpperCAmelCase ).numpy() ) )
A : Tuple = np.random.randn(3 , 4 , 5 )
A : Tuple = torch.tensor(__UpperCAmelCase )
self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0) ) , transpose(__UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def snake_case ( self ) -> Union[str, Any]:
A : int = np.random.randn(3 , 4 )
A : Tuple = tf.constant(__UpperCAmelCase )
self.assertTrue(np.allclose(transpose(__UpperCAmelCase ) , transpose(__UpperCAmelCase ).numpy() ) )
A : str = np.random.randn(3 , 4 , 5 )
A : Optional[Any] = tf.constant(__UpperCAmelCase )
self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0) ) , transpose(__UpperCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def snake_case ( self ) -> List[Any]:
A : Optional[int] = np.random.randn(3 , 4 )
A : str = jnp.array(__UpperCAmelCase )
self.assertTrue(np.allclose(transpose(__UpperCAmelCase ) , np.asarray(transpose(__UpperCAmelCase ) ) ) )
A : Union[str, Any] = np.random.randn(3 , 4 , 5 )
A : str = jnp.array(__UpperCAmelCase )
self.assertTrue(np.allclose(transpose(__UpperCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(__UpperCAmelCase , axes=(1, 2, 0) ) ) ) )
def snake_case ( self ) -> str:
A : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3) ) , np.reshape(__UpperCAmelCase , (4, 3) ) ) )
A : Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5) ) , np.reshape(__UpperCAmelCase , (12, 5) ) ) )
@require_torch
def snake_case ( self ) -> List[Any]:
A : List[Any] = np.random.randn(3 , 4 )
A : Tuple = torch.tensor(__UpperCAmelCase )
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3) ) , reshape(__UpperCAmelCase , (4, 3) ).numpy() ) )
A : Union[str, Any] = np.random.randn(3 , 4 , 5 )
A : Union[str, Any] = torch.tensor(__UpperCAmelCase )
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5) ) , reshape(__UpperCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def snake_case ( self ) -> List[str]:
A : List[str] = np.random.randn(3 , 4 )
A : Union[str, Any] = tf.constant(__UpperCAmelCase )
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3) ) , reshape(__UpperCAmelCase , (4, 3) ).numpy() ) )
A : List[str] = np.random.randn(3 , 4 , 5 )
A : List[str] = tf.constant(__UpperCAmelCase )
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5) ) , reshape(__UpperCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def snake_case ( self ) -> Dict:
A : Optional[Any] = np.random.randn(3 , 4 )
A : List[Any] = jnp.array(__UpperCAmelCase )
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (4, 3) ) , np.asarray(reshape(__UpperCAmelCase , (4, 3) ) ) ) )
A : List[str] = np.random.randn(3 , 4 , 5 )
A : Dict = jnp.array(__UpperCAmelCase )
self.assertTrue(np.allclose(reshape(__UpperCAmelCase , (12, 5) ) , np.asarray(reshape(__UpperCAmelCase , (12, 5) ) ) ) )
def snake_case ( self ) -> Optional[int]:
A : Any = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase ) , np.squeeze(__UpperCAmelCase ) ) )
A : Any = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2 ) , np.squeeze(__UpperCAmelCase , axis=2 ) ) )
@require_torch
def snake_case ( self ) -> List[str]:
A : List[str] = np.random.randn(1 , 3 , 4 )
A : List[str] = torch.tensor(__UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase ) , squeeze(__UpperCAmelCase ).numpy() ) )
A : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
A : List[str] = torch.tensor(__UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2 ) , squeeze(__UpperCAmelCase , axis=2 ).numpy() ) )
@require_tf
def snake_case ( self ) -> List[Any]:
A : Any = np.random.randn(1 , 3 , 4 )
A : Any = tf.constant(__UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase ) , squeeze(__UpperCAmelCase ).numpy() ) )
A : int = np.random.randn(1 , 4 , 1 , 5 )
A : List[str] = tf.constant(__UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2 ) , squeeze(__UpperCAmelCase , axis=2 ).numpy() ) )
@require_flax
def snake_case ( self ) -> List[Any]:
A : Optional[int] = np.random.randn(1 , 3 , 4 )
A : List[str] = jnp.array(__UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase ) , np.asarray(squeeze(__UpperCAmelCase ) ) ) )
A : Any = np.random.randn(1 , 4 , 1 , 5 )
A : List[str] = jnp.array(__UpperCAmelCase )
self.assertTrue(np.allclose(squeeze(__UpperCAmelCase , axis=2 ) , np.asarray(squeeze(__UpperCAmelCase , axis=2 ) ) ) )
def snake_case ( self ) -> Dict:
A : int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1 ) , np.expand_dims(__UpperCAmelCase , axis=1 ) ) )
@require_torch
def snake_case ( self ) -> str:
A : str = np.random.randn(3 , 4 )
A : Tuple = torch.tensor(__UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1 ) , expand_dims(__UpperCAmelCase , axis=1 ).numpy() ) )
@require_tf
def snake_case ( self ) -> int:
A : int = np.random.randn(3 , 4 )
A : Tuple = tf.constant(__UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1 ) , expand_dims(__UpperCAmelCase , axis=1 ).numpy() ) )
@require_flax
def snake_case ( self ) -> List[str]:
A : Optional[int] = np.random.randn(3 , 4 )
A : Optional[int] = jnp.array(__UpperCAmelCase )
self.assertTrue(np.allclose(expand_dims(__UpperCAmelCase , axis=1 ) , np.asarray(expand_dims(__UpperCAmelCase , axis=1 ) ) ) )
| 542 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowercase : Dict = logging.get_logger(__name__)
lowercase : Dict = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
lowercase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def snake_case__ ( lowerCamelCase_ ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
A : Optional[Any] = model_type_to_module_name(lowerCamelCase_ )
A : Optional[int] = importlib.import_module(F'.{module_name}' , '''transformers.models''' )
try:
return getattr(lowerCamelCase_ , lowerCamelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCamelCase_ , '''__name__''' , lowerCamelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A : Any = importlib.import_module('''transformers''' )
if hasattr(lowerCamelCase_ , lowerCamelCase_ ):
return getattr(lowerCamelCase_ , lowerCamelCase_ )
return None
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , **lowerCamelCase_ , ):
A : Optional[Any] = get_file_from_repo(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(lowerCamelCase_ , encoding='''utf-8''' ) as reader:
return json.load(lowerCamelCase_ )
class __lowercase :
"""simple docstring"""
def __init__( self ) -> int:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__UpperCAmelCase )
def snake_case ( cls , __UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
A : Tuple = kwargs.pop('''config''' , __UpperCAmelCase )
A : Union[str, Any] = kwargs.pop('''trust_remote_code''' , __UpperCAmelCase )
A : str = True
A , A : str = ImageProcessingMixin.get_image_processor_dict(__UpperCAmelCase , **__UpperCAmelCase )
A : List[Any] = config_dict.get('''image_processor_type''' , __UpperCAmelCase )
A : List[Any] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
A : List[str] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
A : List[str] = config_dict.pop('''feature_extractor_type''' , __UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
A : Tuple = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
A : Any = config_dict['''auto_map''']['''AutoFeatureExtractor''']
A : int = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
A : List[str] = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
# It could be in `config.image_processor_type``
A : Any = getattr(__UpperCAmelCase , '''image_processor_type''' , __UpperCAmelCase )
if hasattr(__UpperCAmelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
A : List[Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
A : str = image_processor_class_from_name(__UpperCAmelCase )
A : int = image_processor_auto_map is not None
A : List[str] = image_processor_class is not None or type(__UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
A : Optional[Any] = resolve_trust_remote_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if has_remote_code and trust_remote_code:
A : Tuple = get_class_from_dynamic_module(
__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
A : Optional[Any] = kwargs.pop('''code_revision''' , __UpperCAmelCase )
if os.path.isdir(__UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
A : Tuple = IMAGE_PROCESSOR_MAPPING[type(__UpperCAmelCase )]
return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase )
raise ValueError(
f'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
f'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def snake_case ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
IMAGE_PROCESSOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
| 542 | 1 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 454 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def _A ( ):
"""simple docstring"""
__lowercase =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase =get_sagemaker_input()
else:
__lowercase =get_cluster_input()
return config
def _A ( _lowerCAmelCase=None ):
"""simple docstring"""
if subparsers is not None:
__lowercase =subparsers.add_parser('config' , description=_lowerCAmelCase )
else:
__lowercase =argparse.ArgumentParser('Accelerate config command' , description=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =get_user_input()
if args.config_file is not None:
__lowercase =args.config_file
else:
if not os.path.isdir(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
__lowercase =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(_lowerCAmelCase )
else:
config.to_yaml_file(_lowerCAmelCase )
print(f"""accelerate configuration saved at {config_file}""" )
def _A ( ):
"""simple docstring"""
__lowercase =config_command_parser()
__lowercase =parser.parse_args()
config_command(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 454 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase ( __lowerCamelCase ):
def __init__( self :str , lowercase :TransformeraDModel , lowercase :AutoencoderKL , lowercase :KarrasDiffusionSchedulers , lowercase :Optional[Dict[int, str]] = None , ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=lowercase , vae=lowercase , scheduler=lowercase )
# create a imagenet -> id dictionary for easier use
SCREAMING_SNAKE_CASE = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
SCREAMING_SNAKE_CASE = int(lowercase )
SCREAMING_SNAKE_CASE = dict(sorted(self.labels.items() ) )
def snake_case__ ( self :List[Any] , lowercase :Union[str, List[str]] ) -> List[int]:
"""simple docstring"""
if not isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE = list(lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Union[str, Any] , lowercase :List[int] , lowercase :float = 4.0 , lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase :int = 5_0 , lowercase :Optional[str] = "pil" , lowercase :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = len(lowercase )
SCREAMING_SNAKE_CASE = self.transformer.config.sample_size
SCREAMING_SNAKE_CASE = self.transformer.config.in_channels
SCREAMING_SNAKE_CASE = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase , device=self.device , dtype=self.transformer.dtype , )
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
SCREAMING_SNAKE_CASE = torch.tensor(lowercase , device=self.device ).reshape(-1 )
SCREAMING_SNAKE_CASE = torch.tensor([1_0_0_0] * batch_size , device=self.device )
SCREAMING_SNAKE_CASE = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
SCREAMING_SNAKE_CASE = latent_model_input[: len(lowercase ) // 2]
SCREAMING_SNAKE_CASE = torch.cat([half, half] , dim=0 )
SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(lowercase , lowercase )
SCREAMING_SNAKE_CASE = t
if not torch.is_tensor(lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
SCREAMING_SNAKE_CASE = latent_model_input.device.type == '''mps'''
if isinstance(lowercase , lowercase ):
SCREAMING_SNAKE_CASE = torch.floataa if is_mps else torch.floataa
else:
SCREAMING_SNAKE_CASE = torch.intaa if is_mps else torch.intaa
SCREAMING_SNAKE_CASE = torch.tensor([timesteps] , dtype=lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
SCREAMING_SNAKE_CASE = self.transformer(
lowercase , timestep=lowercase , class_labels=lowercase ).sample
# perform guidance
if guidance_scale > 1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(lowercase , len(lowercase ) // 2 , dim=0 )
SCREAMING_SNAKE_CASE = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
SCREAMING_SNAKE_CASE = torch.cat([half_eps, half_eps] , dim=0 )
SCREAMING_SNAKE_CASE = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = torch.split(lowercase , lowercase , dim=1 )
else:
SCREAMING_SNAKE_CASE = noise_pred
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
if guidance_scale > 1:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = latent_model_input.chunk(2 , dim=0 )
else:
SCREAMING_SNAKE_CASE = latent_model_input
SCREAMING_SNAKE_CASE = 1 / self.vae.config.scaling_factor * latents
SCREAMING_SNAKE_CASE = self.vae.decode(lowercase ).sample
SCREAMING_SNAKE_CASE = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase ) | 201 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : int = (DDPMParallelScheduler,)
def snake_case__ ( self :Any , **lowercase :str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**lowercase )
return config
def snake_case__ ( self :Dict ) -> List[Any]:
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase )
def snake_case__ ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase , beta_end=lowercase )
def snake_case__ ( self :Any ) -> List[str]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase )
def snake_case__ ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase )
def snake_case__ ( self :Any ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def snake_case__ ( self :str ) -> str:
"""simple docstring"""
self.check_over_configs(thresholding=lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , )
def snake_case__ ( self :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def snake_case__ ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowercase )
def snake_case__ ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def snake_case__ ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = len(lowercase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE = samplea.shape[0]
SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE = torch.arange(lowercase )[0:3, None].repeat(1 , lowercase )
SCREAMING_SNAKE_CASE = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowercase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 11_53.18_33 ) < 1e-2
assert abs(result_mean.item() - 0.50_05 ) < 1e-3
def snake_case__ ( self :str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = len(lowercase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(lowercase ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
SCREAMING_SNAKE_CASE = pred_prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowercase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def snake_case__ ( self :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = len(lowercase )
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(lowercase ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
SCREAMING_SNAKE_CASE = pred_prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowercase ) )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def snake_case__ ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowercase )
SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(lowercase ):
if i == len(lowercase ) - 1:
SCREAMING_SNAKE_CASE = -1
else:
SCREAMING_SNAKE_CASE = timesteps[i + 1]
SCREAMING_SNAKE_CASE = scheduler.previous_timestep(lowercase )
SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(lowercase , lowercase )
def snake_case__ ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowercase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=lowercase )
def snake_case__ ( self :Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = [1_0_0, 8_7, 5_0, 1, 0]
SCREAMING_SNAKE_CASE = len(lowercase )
with self.assertRaises(lowercase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=lowercase , timesteps=lowercase )
def snake_case__ ( self :str ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**lowercase )
SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=lowercase ) | 201 | 1 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[complex, complex]:
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
a__: Optional[Any] = b * b - 4 * a * c
a__: str = (-b + sqrt(_SCREAMING_SNAKE_CASE )) / (2 * a)
a__: Tuple = (-b - sqrt(_SCREAMING_SNAKE_CASE )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __a ( ) ->Tuple:
a__ , a__: int = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 217 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __snake_case ( __lowerCAmelCase ):
a__ = """speech_to_text"""
a__ = ["""past_key_values"""]
a__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowercase=1_00_00 , lowercase=12 , lowercase=20_48 , lowercase=4 , lowercase=6 , lowercase=20_48 , lowercase=4 , lowercase=0.0 , lowercase=0.0 , lowercase=True , lowercase=True , lowercase="relu" , lowercase=2_56 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=2 , lowercase=True , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=60_00 , lowercase=10_24 , lowercase=2 , lowercase=(5, 5) , lowercase=10_24 , lowercase=80 , lowercase=1 , **lowercase , ) -> List[str]:
'''simple docstring'''
a__: int = vocab_size
a__: Any = d_model
a__: List[str] = encoder_ffn_dim
a__: int = encoder_layers
a__: int = encoder_attention_heads
a__: int = decoder_ffn_dim
a__: Optional[int] = decoder_layers
a__: Optional[Any] = decoder_attention_heads
a__: str = dropout
a__: List[Any] = attention_dropout
a__: Union[str, Any] = activation_dropout
a__: Tuple = activation_function
a__: Optional[Any] = init_std
a__: List[str] = encoder_layerdrop
a__: Optional[int] = decoder_layerdrop
a__: Union[str, Any] = use_cache
a__: Union[str, Any] = encoder_layers
a__: str = scale_embedding # scale factor will be sqrt(d_model) if True
a__: Tuple = max_source_positions
a__: Union[str, Any] = max_target_positions
a__: List[str] = num_conv_layers
a__: Union[str, Any] = list(lowercase)
a__: Dict = conv_channels
a__: List[Any] = input_feat_per_channel
a__: Any = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, '
f'`config.num_conv_layers = {self.num_conv_layers}`.')
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , **lowercase , )
| 217 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'''pipelines_utils''',
'''0.22.0''',
'''Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.''',
standard_warn=False,
stacklevel=3,
)
| 403 | def __lowerCamelCase (UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCAmelCase__ ) )
def __lowerCamelCase (UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : int , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : int ):
# Base Case
if index == len(UpperCAmelCase__ ):
return True
# Recursive Step
for i in range(UpperCAmelCase__ ):
if valid_coloring(graph[index] , UpperCAmelCase__ , UpperCAmelCase__ ):
# Color current vertex
SCREAMING_SNAKE_CASE = i
# Validate coloring
if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , index + 1 ):
return True
# Backtrack
SCREAMING_SNAKE_CASE = -1
return False
def __lowerCamelCase (UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = [-1] * len(UpperCAmelCase__ )
if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , 0 ):
return colored_vertices
return []
| 403 | 1 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowerCAmelCase_ ( lowercase: Optional[int] , lowercase: Any , lowercase: str , lowercase: List[str] , lowercase: Optional[int] ) -> int:
'''simple docstring'''
# load base model
_UpperCamelCase: Optional[int] = StableDiffusionPipeline.from_pretrained(lowercase , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_UpperCamelCase: List[str] = load_file(lowercase )
_UpperCamelCase: Optional[int] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_UpperCamelCase: int = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_UpperCamelCase: str = pipeline.text_encoder
else:
_UpperCamelCase: Optional[Any] = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_UpperCamelCase: Any = pipeline.unet
# find the target layer
_UpperCamelCase: Union[str, Any] = layer_infos.pop(0 )
while len(lowercase ) > -1:
try:
_UpperCamelCase: Union[str, Any] = curr_layer.__getattr__(lowercase )
if len(lowercase ) > 0:
_UpperCamelCase: Tuple = layer_infos.pop(0 )
elif len(lowercase ) == 0:
break
except Exception:
if len(lowercase ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_UpperCamelCase: List[Any] = layer_infos.pop(0 )
_UpperCamelCase: Any = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(lowercase )
else:
pair_keys.append(lowercase )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_UpperCamelCase: str = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_UpperCamelCase: Optional[int] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase ).unsqueeze(2 ).unsqueeze(3 )
else:
_UpperCamelCase: str = state_dict[pair_keys[0]].to(torch.floataa )
_UpperCamelCase: Dict = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowercase , lowercase )
# update visited list
for item in pair_keys:
visited.append(lowercase )
return pipeline
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.'''
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors'''
)
parser.add_argument(
'''--lora_prefix_text_encoder''',
default='''lora_te''',
type=str,
help='''The prefix of text encoder weight in safetensors''',
)
parser.add_argument('''--alpha''', default=0.7_5, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''')
parser.add_argument(
'''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.'''
)
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.base_model_path
UpperCAmelCase_ = args.checkpoint_path
UpperCAmelCase_ = args.dump_path
UpperCAmelCase_ = args.lora_prefix_unet
UpperCAmelCase_ = args.lora_prefix_text_encoder
UpperCAmelCase_ = args.alpha
UpperCAmelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCAmelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 264 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 264 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_snake_case : Optional[int] = {
"configuration_poolformer": [
"POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"PoolFormerConfig",
"PoolFormerOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = ["PoolFormerFeatureExtractor"]
_snake_case : List[Any] = ["PoolFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : str = [
"POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PoolFormerForImageClassification",
"PoolFormerModel",
"PoolFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 81 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : List[str] = MvpTokenizer
_UpperCamelCase : Any = MvpTokenizerFast
_UpperCamelCase : List[str] = True
_UpperCamelCase : Dict = filter_roberta_detectors
def snake_case__ ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase__ = dict(zip(snake_case , range(len(snake_case ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case ) )
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def snake_case__ ( self , snake_case ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def snake_case__ ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__ = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , max_length=len(snake_case ) , padding=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
# Test that special tokens are reset
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , padding=snake_case , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("labels" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(text_target=snake_case , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=snake_case , truncation=snake_case , return_tensors="pt" )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ["A long paragraph for summarization."]
UpperCamelCase__ = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase__ = tokenizer(snake_case , text_target=snake_case , return_tensors="pt" )
UpperCamelCase__ = inputs["input_ids"]
UpperCamelCase__ = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def snake_case__ ( self ):
'''simple docstring'''
pass
def snake_case__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(snake_case , **snake_case )
UpperCamelCase__ = "A, <mask> AllenNLP sentence."
UpperCamelCase__ = tokenizer_r.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
UpperCamelCase__ = tokenizer_p.encode_plus(snake_case , add_special_tokens=snake_case , return_token_type_ids=snake_case )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
snake_case , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 551 | 0 |
def lowerCAmelCase_( lowercase_ : Optional[Any] = 1_00_00_00 ) -> str:
_lowerCamelCase = limit + 1
_lowerCamelCase = [0] * limit
for first_term in range(1 , snake_case_ ):
for n in range(snake_case_ , snake_case_ , snake_case_ ):
_lowerCamelCase = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
_lowerCamelCase = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 710 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> bool:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : str = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class __lowercase ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "switch_transformers"
SCREAMING_SNAKE_CASE : int = ["past_key_values"]
SCREAMING_SNAKE_CASE : Any = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , A_=32128 , A_=768 , A_=64 , A_=2048 , A_=64 , A_=12 , A_=3 , A_=12 , A_=3 , A_=12 , A_=8 , A_=False , A_=0.01 , A_="float32" , A_=False , A_=32 , A_=128 , A_=0.1 , A_=1e-6 , A_=0.001 , A_=0.001 , A_=1.0 , A_="relu" , A_=True , A_=False , A_=True , A_=0 , A_=1 , **A_ , )-> Optional[Any]:
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = d_kv
_SCREAMING_SNAKE_CASE = d_ff
_SCREAMING_SNAKE_CASE = num_sparse_encoder_layers
_SCREAMING_SNAKE_CASE = num_layers
_SCREAMING_SNAKE_CASE = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_SCREAMING_SNAKE_CASE = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_SCREAMING_SNAKE_CASE = self.num_layers // self.num_sparse_encoder_layers
else:
_SCREAMING_SNAKE_CASE = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_SCREAMING_SNAKE_CASE = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_SCREAMING_SNAKE_CASE = self.num_decoder_layers # HACK: this will create 0 sparse layers
_SCREAMING_SNAKE_CASE = num_heads
_SCREAMING_SNAKE_CASE = num_experts
_SCREAMING_SNAKE_CASE = expert_capacity
_SCREAMING_SNAKE_CASE = router_bias
_SCREAMING_SNAKE_CASE = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
_SCREAMING_SNAKE_CASE = router_dtype
_SCREAMING_SNAKE_CASE = router_ignore_padding_tokens
_SCREAMING_SNAKE_CASE = relative_attention_num_buckets
_SCREAMING_SNAKE_CASE = relative_attention_max_distance
_SCREAMING_SNAKE_CASE = dropout_rate
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_factor
_SCREAMING_SNAKE_CASE = feed_forward_proj
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = add_router_probs
_SCREAMING_SNAKE_CASE = router_z_loss_coef
_SCREAMING_SNAKE_CASE = router_aux_loss_coef
_SCREAMING_SNAKE_CASE = self.feed_forward_proj.split('-' )
_SCREAMING_SNAKE_CASE = act_info[-1]
_SCREAMING_SNAKE_CASE = act_info[0] == '''gated'''
if len(UpperCAmelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase__ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_SCREAMING_SNAKE_CASE = '''gelu_new'''
super().__init__(
pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 605 |
'''simple docstring'''
import math
import flax.linen as nn
import jax.numpy as jnp
def _lowerCAmelCase ( __magic_name__ : jnp.ndarray , __magic_name__ : int , __magic_name__ : float = 1 , __magic_name__ : float = 1 , __magic_name__ : float = 1.0E4 , __magic_name__ : bool = False , __magic_name__ : float = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowercase : int =float(embedding_dim // 2 )
lowercase : Optional[int] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowercase : Any =min_timescale * jnp.exp(jnp.arange(__magic_name__ , dtype=jnp.floataa ) * -log_timescale_increment )
lowercase : List[Any] =jnp.expand_dims(__magic_name__ , 1 ) * jnp.expand_dims(__magic_name__ , 0 )
# scale embeddings
lowercase : Tuple =scale * emb
if flip_sin_to_cos:
lowercase : Dict =jnp.concatenate([jnp.cos(__magic_name__ ), jnp.sin(__magic_name__ )] , axis=1 )
else:
lowercase : Any =jnp.concatenate([jnp.sin(__magic_name__ ), jnp.cos(__magic_name__ )] , axis=1 )
lowercase : List[str] =jnp.reshape(__magic_name__ , [jnp.shape(__magic_name__ )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 32
lowerCamelCase_ = jnp.floataa
@nn.compact
def __call__( self : Tuple , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : List[Any] =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCAmelCase__ )
lowercase : Any =nn.silu(UpperCAmelCase__ )
lowercase : int =nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCAmelCase__ )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
lowerCamelCase_ = 32
lowerCamelCase_ = False
lowerCamelCase_ = 1
@nn.compact
def __call__( self : int , UpperCAmelCase__ : str ):
'''simple docstring'''
return get_sinusoidal_embeddings(
UpperCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 92 | 0 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( lowercase_ , lowercase_ , lowercase_ ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE = MobileBertForPreTraining(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
SCREAMING_SNAKE_CASE = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 702 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class a_( unittest.TestCase ):
"""simple docstring"""
def __UpperCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-t5'
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = tokenizer('This is me' , return_tensors='pt')
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules()))
SCREAMING_SNAKE_CASE = model.generate(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules()))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__)
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules()))
SCREAMING_SNAKE_CASE = model_reloaded.generate(**lowerCAmelCase__)
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__))
def __UpperCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-t5'
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCAmelCase__):
model.save_pretrained(lowerCAmelCase__)
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(lowerCAmelCase__)
| 259 | 0 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a__ ( lowercase : Tuple, lowercase : List[str], lowercase : Optional[int], lowercase : List[str], lowercase : List[str]=True, lowercase : str="pt" ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {'''add_prefix_space''': True} if isinstance(lowercase, lowercase ) and not line.startswith(''' ''' ) else {}
_UpperCamelCase = padding_side
return tokenizer(
[line], max_length=lowercase, padding='''max_length''' if pad_to_max_length else None, truncation=lowercase, return_tensors=lowercase, add_special_tokens=lowercase, **lowercase, )
def a__ ( lowercase : str, lowercase : int, lowercase : Tuple=None, ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = input_ids.ne(lowercase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str="train" , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Dict="" , ) -> Any:
'''simple docstring'''
super().__init__()
_UpperCamelCase = Path(lowerCAmelCase__ ).joinpath(type_path + '''.source''' )
_UpperCamelCase = Path(lowerCAmelCase__ ).joinpath(type_path + '''.target''' )
_UpperCamelCase = self.get_char_lens(self.src_file )
_UpperCamelCase = max_source_length
_UpperCamelCase = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
_UpperCamelCase = tokenizer
_UpperCamelCase = prefix
if n_obs is not None:
_UpperCamelCase = self.src_lens[:n_obs]
_UpperCamelCase = src_lang
_UpperCamelCase = tgt_lang
def __len__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : Optional[Any] , lowerCAmelCase__ : Any ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCamelCase = index + 1 # linecache starts at 1
_UpperCamelCase = self.prefix + linecache.getline(str(self.src_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
_UpperCamelCase = linecache.getline(str(self.tgt_file ) , lowerCAmelCase__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCAmelCase__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCamelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
)
_UpperCamelCase = self.tokenizer.generator if isinstance(self.tokenizer , lowerCAmelCase__ ) else self.tokenizer
_UpperCamelCase = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_source_length , '''right''' )
_UpperCamelCase = encode_line(lowerCAmelCase__ , lowerCAmelCase__ , self.max_target_length , '''right''' )
_UpperCamelCase = source_inputs['''input_ids'''].squeeze()
_UpperCamelCase = target_inputs['''input_ids'''].squeeze()
_UpperCamelCase = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
return [len(lowerCAmelCase__ ) for x in Path(lowerCAmelCase__ ).open().readlines()]
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[str] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
_UpperCamelCase = torch.stack([x['''input_ids'''] for x in batch] )
_UpperCamelCase = torch.stack([x['''attention_mask'''] for x in batch] )
_UpperCamelCase = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_UpperCamelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
_UpperCamelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCAmelCase__ )
else self.tokenizer.pad_token_id
)
_UpperCamelCase = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase , _UpperCamelCase = trim_batch(lowerCAmelCase__ , lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_UpperCamelCase = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowercase__ : Union[str, Any] = getLogger(__name__)
def a__ ( lowercase : List[List] ) -> Optional[int]:
"""simple docstring"""
return list(itertools.chain.from_iterable(lowercase ) )
def a__ ( lowercase : str ) -> None:
"""simple docstring"""
_UpperCamelCase = get_git_info()
save_json(lowercase, os.path.join(lowercase, '''git_log.json''' ) )
def a__ ( lowercase : Optional[int], lowercase : List[str], lowercase : str=4, **lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
with open(lowercase, '''w''' ) as f:
json.dump(lowercase, lowercase, indent=lowercase, **lowercase )
def a__ ( lowercase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with open(lowercase ) as f:
return json.load(lowercase )
def a__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = git.Repo(search_parent_directories=lowercase )
_UpperCamelCase = {
'''repo_id''': str(lowercase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def a__ ( lowercase : Callable, lowercase : Iterable ) -> List:
"""simple docstring"""
return list(map(lowercase, lowercase ) )
def a__ ( lowercase : List[Any], lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
with open(lowercase, '''wb''' ) as f:
return pickle.dump(lowercase, lowercase )
def a__ ( lowercase : Tuple ) -> Tuple:
"""simple docstring"""
def remove_articles(lowercase : Tuple ):
return re.sub(r'''\b(a|an|the)\b''', ''' ''', lowercase )
def white_space_fix(lowercase : Tuple ):
return " ".join(text.split() )
def remove_punc(lowercase : Optional[int] ):
_UpperCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase : Tuple ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) )
def a__ ( lowercase : Tuple, lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
_UpperCamelCase = normalize_answer(lowercase ).split()
_UpperCamelCase = normalize_answer(lowercase ).split()
_UpperCamelCase = Counter(lowercase ) & Counter(lowercase )
_UpperCamelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCamelCase = 1.0 * num_same / len(lowercase )
_UpperCamelCase = 1.0 * num_same / len(lowercase )
_UpperCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( lowercase : Tuple, lowercase : Any ) -> List[str]:
"""simple docstring"""
return normalize_answer(lowercase ) == normalize_answer(lowercase )
def a__ ( lowercase : List[str], lowercase : List[str] ) -> Dict:
"""simple docstring"""
assert len(lowercase ) == len(lowercase )
_UpperCamelCase = 0
for hypo, pred in zip(lowercase, lowercase ):
em += exact_match_score(lowercase, lowercase )
if len(lowercase ) > 0:
em /= len(lowercase )
return {"em": em}
def a__ ( lowercase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def a__ ( lowercase : int, lowercase : List[Any], lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCamelCase = '''dropout_rate'''
for p in extra_params:
if getattr(lowercase, lowercase, lowercase ):
if not hasattr(lowercase, lowercase ) and not hasattr(lowercase, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(lowercase ) )
delattr(lowercase, lowercase )
continue
_UpperCamelCase = p if hasattr(lowercase, lowercase ) else equivalent_param[p]
setattr(lowercase, lowercase, getattr(lowercase, lowercase ) )
delattr(lowercase, lowercase )
return hparams, config
| 98 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ : StableDiffusionSafetyChecker , lowerCAmelCase__ : CLIPImageProcessor , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase__ )
@torch.no_grad()
def __call__( self : Optional[Any] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 512 , lowerCAmelCase__ : int = 512 , lowerCAmelCase__ : int = 50 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , **lowerCAmelCase__ : Optional[int] , ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = len(lowerCAmelCase__ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowerCAmelCase__ )}.""" )
# get prompt text embeddings
_UpperCamelCase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_embeddings.shape
_UpperCamelCase = text_embeddings.repeat(1 , lowerCAmelCase__ , 1 )
_UpperCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase = 42
if negative_prompt is None:
_UpperCamelCase = ['''''']
elif type(lowerCAmelCase__ ) is not type(lowerCAmelCase__ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__ )} !="""
f""" {type(lowerCAmelCase__ )}.""" )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = [negative_prompt]
elif batch_size != len(lowerCAmelCase__ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
_UpperCamelCase = negative_prompt
_UpperCamelCase = text_input_ids.shape[-1]
_UpperCamelCase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase = uncond_embeddings.shape[1]
_UpperCamelCase = uncond_embeddings.repeat(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
_UpperCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_UpperCamelCase = torch.randn(
lowerCAmelCase__ , generator=lowerCAmelCase__ , device='''cpu''' , dtype=lowerCAmelCase__ ).to(self.device )
_UpperCamelCase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device='''cpu''' , dtype=lowerCAmelCase__ ).to(
self.device )
else:
_UpperCamelCase = torch.randn(
lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
_UpperCamelCase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_UpperCamelCase = latents_reference.to(self.device )
_UpperCamelCase = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_UpperCamelCase = (latents_shape[3] - latents_shape_reference[3]) // 2
_UpperCamelCase = (latents_shape[2] - latents_shape_reference[2]) // 2
_UpperCamelCase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_UpperCamelCase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_UpperCamelCase = 0 if dx < 0 else dx
_UpperCamelCase = 0 if dy < 0 else dy
_UpperCamelCase = max(-dx , 0 )
_UpperCamelCase = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_UpperCamelCase = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_UpperCamelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCamelCase = {}
if accepts_eta:
_UpperCamelCase = eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCamelCase = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
_UpperCamelCase = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = noise_pred.chunk(2 )
_UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = 1 / 0.18215 * latents
_UpperCamelCase = self.vae.decode(lowerCAmelCase__ ).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_UpperCamelCase = self.feature_extractor(self.numpy_to_pil(lowerCAmelCase__ ) , return_tensors='''pt''' ).to(
self.device )
_UpperCamelCase , _UpperCamelCase = self.safety_checker(
images=lowerCAmelCase__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_UpperCamelCase = None
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ )
| 98 | 1 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="attention" ):
"""simple docstring"""
lowercase__ = lowercase__ = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
lowercase__ = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase__ = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
lowercase__ = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase__ = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
lowercase__ = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase__ = np.ascontiguousarray(params[f'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
lowercase__ = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if split_mlp_wi:
lowercase__ = params[f'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
lowercase__ = params[f'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
lowercase__ = (wi_a, wi_a)
else:
lowercase__ = params[f'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
lowercase__ = params[f'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return params[f'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def _a ( SCREAMING_SNAKE_CASE , *, SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
lowercase__ = traverse_util.flatten_dict(variables['''target'''] )
lowercase__ = {'''/'''.join(SCREAMING_SNAKE_CASE ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , SCREAMING_SNAKE_CASE )
lowercase__ = collections.OrderedDict()
# Shared embeddings.
lowercase__ = old['''token_embedder/embedding''']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
lowercase__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , '''pre_attention_layer_norm''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , '''attention''' )
lowercase__ = layer_norm
lowercase__ = k.T
lowercase__ = o.T
lowercase__ = q.T
lowercase__ = v.T
# Block i, layer 1 (MLP).
lowercase__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , '''pre_mlp_layer_norm''' )
lowercase__ , lowercase__ = tax_mlp_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' , SCREAMING_SNAKE_CASE )
lowercase__ = layer_norm
if split_mlp_wi:
lowercase__ = wi[0].T
lowercase__ = wi[1].T
else:
lowercase__ = wi.T
lowercase__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''encoder''' ).T
lowercase__ = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
lowercase__ = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE , 0 , '''encoder''' ).T
lowercase__ = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE ):
# Block i, layer 0 (Self Attention).
lowercase__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''self_attention''' )
lowercase__ = layer_norm
lowercase__ = k.T
lowercase__ = o.T
lowercase__ = q.T
lowercase__ = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = tax_attention_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''encoder_decoder_attention''' )
lowercase__ = layer_norm
lowercase__ = k.T
lowercase__ = o.T
lowercase__ = q.T
lowercase__ = v.T
# Block i, layer 2 (MLP).
lowercase__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , '''pre_mlp_layer_norm''' )
lowercase__ , lowercase__ = tax_mlp_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' , SCREAMING_SNAKE_CASE )
lowercase__ = layer_norm
if split_mlp_wi:
lowercase__ = wi[0].T
lowercase__ = wi[1].T
else:
lowercase__ = wi.T
lowercase__ = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''decoder''' ).T
lowercase__ = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ = old['''decoder/logits_dense/kernel'''].T
return new
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowercase__ = state_dict['''shared.weight''']
return state_dict
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
lowercase__ = convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE , scalable_attention=SCREAMING_SNAKE_CASE )
lowercase__ = make_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , ):
"""simple docstring"""
lowercase__ = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ = UMTaEncoderModel(SCREAMING_SNAKE_CASE )
else:
lowercase__ = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE )
print('''Done''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 429 |
import math
def _a ( SCREAMING_SNAKE_CASE = 1_00 ):
"""simple docstring"""
lowercase__ = sum(i * i for i in range(1 , n + 1 ) )
lowercase__ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 429 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : str ="roberta"
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.0_2 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
__magic_name__ : Dict = vocab_size
__magic_name__ : str = hidden_size
__magic_name__ : List[str] = num_hidden_layers
__magic_name__ : Tuple = num_attention_heads
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : str = intermediate_size
__magic_name__ : str = hidden_dropout_prob
__magic_name__ : Dict = attention_probs_dropout_prob
__magic_name__ : Dict = max_position_embeddings
__magic_name__ : Tuple = type_vocab_size
__magic_name__ : Tuple = initializer_range
__magic_name__ : List[str] = layer_norm_eps
__magic_name__ : str = position_embedding_type
__magic_name__ : List[Any] = use_cache
__magic_name__ : List[str] = classifier_dropout
class A__ ( __SCREAMING_SNAKE_CASE ):
@property
def lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__magic_name__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__magic_name__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 154 |
def lowerCAmelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
__magic_name__ : Tuple = 4
__magic_name__ : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
__magic_name__ : str = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 154 | 1 |
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
snake_case_ = input("""Enter image url: """).strip()
print(f'''Downloading image from {url} ...''')
snake_case_ = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
snake_case_ = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
snake_case_ = requests.get(image_url).content
snake_case_ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'''
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(f'''Done. Image saved to disk as {file_name}.''')
| 537 |
'''simple docstring'''
def _lowerCamelCase( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> Optional[Any]:
A : Tuple = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _lowerCamelCase( UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ) -> Any:
A : str = 0
while b > 0:
if b & 1:
A : List[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 537 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A : Any = logging.get_logger(__name__)
_A : int = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = """swin"""
lowerCamelCase__ : List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , A_=2_24 , A_=4 , A_=3 , A_=96 , A_=[2, 2, 6, 2] , A_=[3, 6, 12, 24] , A_=7 , A_=4.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=0.02 , A_=1E-5 , A_=32 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = len(A_ )
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = window_size
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ = int(embed_dim * 2 ** (len(A_ ) - 1) )
SCREAMING_SNAKE_CASE__ = ['''stem'''] + [f'''stage{idx}''' for idx in range(1 , len(A_ ) + 1 )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : int = version.parse("""1.11""" )
@property
def lowercase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase_ ( self ):
'''simple docstring'''
return 1E-4
| 100 |
import os
def __A ( ) -> Dict:
with open(os.path.dirname(__lowerCamelCase ) + """/p022_names.txt""" ) as file:
a = str(file.readlines()[0] )
a = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
a = 0
a = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
a = 0
return total_score
if __name__ == "__main__":
print(solution())
| 468 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCAmelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_SCREAMING_SNAKE_CASE , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> int:
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_SCREAMING_SNAKE_CASE , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( ):
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def lowerCAmelCase__ ( ):
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
@require_beam
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Tuple = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Tuple = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Tuple = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _lowerCAmelCase ( self ) -> Tuple:
import apache_beam as beam
snake_case_ : List[Any] = beam.io.parquetio.WriteToParquet
snake_case_ : str = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : int = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
snake_case_ : Union[str, Any] = partial(_SCREAMING_SNAKE_CASE , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
snake_case_ : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : Any = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Any = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
snake_case_ : List[Any] = NestedBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
snake_case_ : Any = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _SCREAMING_SNAKE_CASE )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 114 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowercase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase_ :
'''simple docstring'''
A : str
A : str
A : Optional[str] = None
A : Optional[str] = None
A : Optional[str] = None
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase_ :
'''simple docstring'''
A : List[int]
A : Optional[List[int]] = None
A : Optional[List[int]] = None
A : Optional[Union[int, float]] = None
A : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : List[InputFeatures]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE = False , ) -> Union[str, Any]:
snake_case_ : str = hans_processors[task]()
snake_case_ : Tuple = os.path.join(
_SCREAMING_SNAKE_CASE , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE , ) , )
snake_case_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case_ , snake_case_ : Any = label_list[2], label_list[1]
snake_case_ : Tuple = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ : Dict = cached_features_file + ".lock"
with FileLock(_SCREAMING_SNAKE_CASE ):
if os.path.exists(_SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
snake_case_ : Dict = torch.load(_SCREAMING_SNAKE_CASE )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
snake_case_ : Any = (
processor.get_dev_examples(_SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(_SCREAMING_SNAKE_CASE )
)
logger.info("Training examples: %s" , len(_SCREAMING_SNAKE_CASE ) )
snake_case_ : Union[str, Any] = hans_convert_examples_to_features(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info("Saving features into cached file %s" , _SCREAMING_SNAKE_CASE )
torch.save(self.features , _SCREAMING_SNAKE_CASE )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> InputFeatures:
return self.features[i]
def _lowerCAmelCase ( self ) -> Dict:
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ :
'''simple docstring'''
A : List[InputFeatures]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 128 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE = False , ) -> Optional[int]:
snake_case_ : Tuple = hans_processors[task]()
snake_case_ : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case_ , snake_case_ : List[str] = label_list[2], label_list[1]
snake_case_ : Dict = label_list
snake_case_ : int = processor.get_dev_examples(_SCREAMING_SNAKE_CASE ) if evaluate else processor.get_train_examples(_SCREAMING_SNAKE_CASE )
snake_case_ : str = hans_convert_examples_to_features(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(_SCREAMING_SNAKE_CASE )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case_ : List[Any] = tf.data.Dataset.from_generator(
_SCREAMING_SNAKE_CASE , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self ) -> int:
return self.dataset
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> InputFeatures:
return self.features[i]
def _lowerCAmelCase ( self ) -> Dict:
return self.label_list
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
return self._create_examples(self._read_tsv(os.path.join(_SCREAMING_SNAKE_CASE , "heuristics_train_set.txt" ) ) , "train" )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return self._create_examples(self._read_tsv(os.path.join(_SCREAMING_SNAKE_CASE , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowerCAmelCase ( self ) -> Dict:
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : Optional[Any] = []
for i, line in enumerate(_SCREAMING_SNAKE_CASE ):
if i == 0:
continue
snake_case_ : List[Any] = "%s-%s" % (set_type, line[0])
snake_case_ : int = line[5]
snake_case_ : str = line[6]
snake_case_ : Any = line[7][2:] if line[7].startswith("ex" ) else line[7]
snake_case_ : Dict = line[0]
examples.append(InputExample(guid=_SCREAMING_SNAKE_CASE , text_a=_SCREAMING_SNAKE_CASE , text_b=_SCREAMING_SNAKE_CASE , label=_SCREAMING_SNAKE_CASE , pairID=_SCREAMING_SNAKE_CASE ) )
return examples
def lowerCAmelCase__ ( _a : List[InputExample] , _a : List[str] , _a : int , _a : PreTrainedTokenizer , ):
snake_case_ : Optional[Any] = {label: i for i, label in enumerate(_a )}
snake_case_ : Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_a ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
snake_case_ : Optional[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=_a , max_length=_a , padding="max_length" , truncation=_a , return_overflowing_tokens=_a , )
snake_case_ : Dict = label_map[example.label] if example.label in label_map else 0
snake_case_ : Optional[Any] = int(example.pairID )
features.append(InputFeatures(**_a , label=_a , pairID=_a ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F'''guid: {example}''' )
logger.info(F'''features: {features[i]}''' )
return features
lowercase : List[str] = {
'''hans''': 3,
}
lowercase : int = {
'''hans''': HansProcessor,
}
| 114 | 1 |
import math
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , A : Any=0 ) ->Optional[Any]: # a graph with Node 0,1,...,N-1
lowerCamelCase__ : Dict = n
lowerCamelCase__ : List[Any] = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # adjacency matrix for weight
lowerCamelCase__ : Any = [
[math.inf for j in range(0 , A )] for i in range(0 , A )
] # dp[i][j] stores minimum distance from i to j
def __lowerCamelCase ( self : Tuple , A : str , A : int , A : Any ) ->Dict:
lowerCamelCase__ : Optional[int] = w
def __lowerCamelCase ( self : Tuple ) ->Dict:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCamelCase__ : Tuple = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __lowerCamelCase ( self : Optional[int] , A : Any , A : Optional[int] ) ->List[str]:
return self.dp[u][v]
if __name__ == "__main__":
_A : Tuple = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 315 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Dict = DownBlockaD # noqa F405
_UpperCAmelCase : List[Any] = "down"
def __lowerCamelCase ( self : Optional[Any] ) ->List[str]:
lowerCamelCase__ : List[str] = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Tuple = ResnetDownsampleBlockaD # noqa F405
_UpperCAmelCase : Dict = "down"
def __lowerCamelCase ( self : Optional[Any] ) ->int:
lowerCamelCase__ : int = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : str = AttnDownBlockaD # noqa F405
_UpperCAmelCase : Dict = "down"
def __lowerCamelCase ( self : Union[str, Any] ) ->List[str]:
lowerCamelCase__ : List[Any] = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : List[Any] = CrossAttnDownBlockaD # noqa F405
_UpperCAmelCase : int = "down"
def __lowerCamelCase ( self : List[str] ) ->int:
lowerCamelCase__ , lowerCamelCase__ : Any = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Optional[Any] = 3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Dict ) ->List[Any]:
lowerCamelCase__ : Optional[Any] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : str = SimpleCrossAttnDownBlockaD # noqa F405
_UpperCAmelCase : Union[str, Any] = "down"
@property
def __lowerCamelCase ( self : Any ) ->Union[str, Any]:
return super().get_dummy_input(include_encoder_hidden_states=A )
def __lowerCamelCase ( self : Optional[Any] ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ : List[str] = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __lowerCamelCase ( self : Optional[Any] ) ->Tuple:
lowerCamelCase__ : Optional[Any] = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[int] = SkipDownBlockaD # noqa F405
_UpperCAmelCase : List[Any] = "down"
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->List[str]:
return super().get_dummy_input(include_skip_sample=A )
def __lowerCamelCase ( self : str ) ->int:
lowerCamelCase__ : Optional[int] = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Any = AttnSkipDownBlockaD # noqa F405
_UpperCAmelCase : Any = "down"
@property
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[Any]:
return super().get_dummy_input(include_skip_sample=A )
def __lowerCamelCase ( self : Any ) ->Dict:
lowerCamelCase__ : Any = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Dict = DownEncoderBlockaD # noqa F405
_UpperCAmelCase : Dict = "down"
@property
def __lowerCamelCase ( self : List[Any] ) ->str:
return super().get_dummy_input(include_temb=A )
def __lowerCamelCase ( self : str ) ->Optional[Any]:
lowerCamelCase__ : List[Any] = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
}
lowerCamelCase__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Dict ) ->int:
lowerCamelCase__ : str = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Tuple = AttnDownEncoderBlockaD # noqa F405
_UpperCAmelCase : str = "down"
@property
def __lowerCamelCase ( self : str ) ->List[Any]:
return super().get_dummy_input(include_temb=A )
def __lowerCamelCase ( self : Tuple ) ->List[str]:
lowerCamelCase__ : Optional[Any] = {
'''in_channels''': 3_2,
'''out_channels''': 3_2,
}
lowerCamelCase__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[int] ) ->Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = UNetMidBlockaD # noqa F405
_UpperCAmelCase : Tuple = "mid"
def __lowerCamelCase ( self : Optional[int] ) ->Dict:
lowerCamelCase__ : Any = {
'''in_channels''': 3_2,
'''temb_channels''': 1_2_8,
}
lowerCamelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Tuple ) ->List[str]:
lowerCamelCase__ : Tuple = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = UNetMidBlockaDCrossAttn # noqa F405
_UpperCAmelCase : List[str] = "mid"
def __lowerCamelCase ( self : List[str] ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ : Any = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : List[str] = 3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Dict ) ->Tuple:
lowerCamelCase__ : int = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
_UpperCAmelCase : int = "mid"
@property
def __lowerCamelCase ( self : List[str] ) ->List[str]:
return super().get_dummy_input(include_encoder_hidden_states=A )
def __lowerCamelCase ( self : str ) ->Any:
lowerCamelCase__ , lowerCamelCase__ : Any = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Optional[int] = 3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
lowerCamelCase__ : List[Any] = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Tuple = UpBlockaD # noqa F405
_UpperCAmelCase : Any = "up"
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def __lowerCamelCase ( self : List[Any] ) ->List[Any]:
lowerCamelCase__ : Optional[Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = ResnetUpsampleBlockaD # noqa F405
_UpperCAmelCase : Optional[Any] = "up"
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
lowerCamelCase__ : List[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : List[Any] = CrossAttnUpBlockaD # noqa F405
_UpperCAmelCase : Optional[Any] = "up"
@property
def __lowerCamelCase ( self : Union[str, Any] ) ->Dict:
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def __lowerCamelCase ( self : Optional[int] ) ->Any:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Tuple = 3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[int] ) ->Optional[Any]:
lowerCamelCase__ : Dict = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = SimpleCrossAttnUpBlockaD # noqa F405
_UpperCAmelCase : Any = "up"
@property
def __lowerCamelCase ( self : Tuple ) ->int:
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def __lowerCamelCase ( self : Any ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ : Any = super().prepare_init_args_and_inputs_for_common()
lowerCamelCase__ : Tuple = 3_2
return init_dict, inputs_dict
def __lowerCamelCase ( self : Tuple ) ->List[str]:
lowerCamelCase__ : str = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : List[Any] = AttnUpBlockaD # noqa F405
_UpperCAmelCase : Dict = "up"
@property
def __lowerCamelCase ( self : Optional[int] ) ->int:
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __lowerCamelCase ( self : Optional[int] ) ->List[str]:
lowerCamelCase__ : Optional[int] = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : str = SkipUpBlockaD # noqa F405
_UpperCAmelCase : List[Any] = "up"
@property
def __lowerCamelCase ( self : Dict ) ->str:
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
lowerCamelCase__ : List[str] = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : int = AttnSkipUpBlockaD # noqa F405
_UpperCAmelCase : Optional[Any] = "up"
@property
def __lowerCamelCase ( self : List[str] ) ->str:
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def __lowerCamelCase ( self : str ) ->Tuple:
lowerCamelCase__ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Dict = UpDecoderBlockaD # noqa F405
_UpperCAmelCase : Tuple = "up"
@property
def __lowerCamelCase ( self : int ) ->int:
return super().get_dummy_input(include_temb=A )
def __lowerCamelCase ( self : int ) ->Any:
lowerCamelCase__ : List[str] = {'''in_channels''': 3_2, '''out_channels''': 3_2}
lowerCamelCase__ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : List[Any] ) ->Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(A )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : str = AttnUpDecoderBlockaD # noqa F405
_UpperCAmelCase : Union[str, Any] = "up"
@property
def __lowerCamelCase ( self : Dict ) ->Union[str, Any]:
return super().get_dummy_input(include_temb=A )
def __lowerCamelCase ( self : Any ) ->int:
lowerCamelCase__ : Optional[int] = {'''in_channels''': 3_2, '''out_channels''': 3_2}
lowerCamelCase__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self : Optional[Any] ) ->int:
lowerCamelCase__ : Tuple = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(A )
| 315 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 313 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'dandelin/vilt-b32-finetuned-vqa'
lowerCAmelCase__ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
lowerCAmelCase__ = 'image_qa'
lowerCAmelCase__ = AutoProcessor
lowerCAmelCase__ = AutoModelForVisualQuestionAnswering
lowerCAmelCase__ = ['image', 'text']
lowerCAmelCase__ = ['text']
def __init__( self , *lowercase , **lowercase ) -> Optional[Any]:
requires_backends(self , ["vision"] )
super().__init__(*lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> Tuple:
return self.pre_processor(lowercase , lowercase , return_tensors="pt" )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> Any:
with torch.no_grad():
return self.model(**lowercase ).logits
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> int:
lowerCamelCase_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 313 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.