code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = Mock()
__lowercase = conn, Mock()
__lowercase = iter([1, None] )
__lowercase = lambda A__ : next(A__ )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=A__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 41 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase__ = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 'albert'
def __init__( self : Any ,lowercase__ : Any=3_0_0_0_0 ,lowercase__ : Dict=1_2_8 ,lowercase__ : List[Any]=4_0_9_6 ,lowercase__ : Tuple=1_2 ,lowercase__ : Dict=1 ,lowercase__ : List[str]=6_4 ,lowercase__ : Dict=1_6_3_8_4 ,lowercase__ : List[Any]=1 ,lowercase__ : List[str]="gelu_new" ,lowercase__ : Optional[Any]=0 ,lowercase__ : Optional[Any]=0 ,lowercase__ : Optional[int]=5_1_2 ,lowercase__ : Union[str, Any]=2 ,lowercase__ : Dict=0.0_2 ,lowercase__ : Dict=1e-1_2 ,lowercase__ : List[str]=0.1 ,lowercase__ : List[Any]="absolute" ,lowercase__ : Union[str, Any]=0 ,lowercase__ : Optional[Any]=2 ,lowercase__ : List[Any]=3 ,**lowercase__ : Dict ,):
super().__init__(pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,**lowercase__ )
__lowercase = vocab_size
__lowercase = embedding_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_hidden_groups
__lowercase = num_attention_heads
__lowercase = inner_group_num
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = classifier_dropout_prob
__lowercase = position_embedding_type
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ,lowercase__ : Union[str, Any] ,):
__lowercase = parent
__lowercase = 1_3
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = False
__lowercase = True
__lowercase = 9_9
__lowercase = 3_2
__lowercase = 2
__lowercase = 4
__lowercase = 3_7
__lowercase = '''gelu'''
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_1_2
__lowercase = 1_6
__lowercase = 2
__lowercase = 0.0_2
__lowercase = 3
__lowercase = 4
__lowercase = None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : str ):
__lowercase = TFDistilBertModel(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__lowercase = model(lowercase__ )
__lowercase = [input_ids, input_mask]
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : str ,lowercase__ : str ,lowercase__ : Union[str, Any] ):
__lowercase = TFDistilBertForMaskedLM(config=lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = TFDistilBertForQuestionAnswering(config=lowercase__ )
__lowercase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ):
__lowercase = self.num_labels
__lowercase = TFDistilBertForSequenceClassification(lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Optional[int] ):
__lowercase = self.num_choices
__lowercase = TFDistilBertForMultipleChoice(lowercase__ )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowercase__ ,1 ) ,(1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : List[str] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = TFDistilBertForTokenClassification(lowercase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.prepare_config_and_inputs()
((__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase)) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = TFDistilBertModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,dim=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__lowercase = TFDistilBertModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@require_tf
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(lowercase__ )[0]
__lowercase = [1, 6, 7_6_8]
self.assertEqual(output.shape ,lowercase__ )
__lowercase = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,lowercase__ ,atol=1e-4 )
| 41 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 1 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase_ :
"""simple docstring"""
def __init__( self : str ,lowercase__ : str = "cpu" ,lowercase__ : str = "openai/clip-vit-large-patch14" ):
__lowercase = device
__lowercase = CLIPTokenizerFast.from_pretrained(lowercase__ )
__lowercase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
__lowercase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
__lowercase = torchvision.transforms.Normalize(self.image_mean ,self.image_std )
__lowercase = torchvision.transforms.Resize(2_2_4 )
__lowercase = torchvision.transforms.CenterCrop(2_2_4 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Dict ):
__lowercase = self.resize(lowercase__ )
__lowercase = self.center_crop(lowercase__ )
__lowercase = self.normalize(lowercase__ )
return images
def __call__( self : List[Any] ,lowercase__ : str=None ,lowercase__ : Any=None ,**lowercase__ : List[Any] ):
__lowercase = self.tokenizer(text=lowercase__ ,**lowercase__ )
__lowercase = self.preprocess_img(lowercase__ )
__lowercase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase_ (nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowercase__ : Optional[int]=1_0 ,lowercase__ : Optional[Any]=0.0_1 ,lowercase__ : Optional[int]=None ,lowercase__ : Optional[Any]=None ,lowercase__ : str=None ,lowercase__ : Any=None ,lowercase__ : Optional[Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Union[str, Any]=False ,lowercase__ : Optional[int]=True ,lowercase__ : Optional[Any]="image" ,lowercase__ : Tuple=True ,lowercase__ : Any=False ,lowercase__ : Optional[int]=False ,lowercase__ : Optional[int]=False ,):
super().__init__()
__lowercase = None
__lowercase = device if device else get_device()
if vqgan:
__lowercase = vqgan
else:
__lowercase = load_vqgan(self.device ,conf_path=lowercase__ ,ckpt_path=lowercase__ )
self.vqgan.eval()
if clip:
__lowercase = clip
else:
__lowercase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__lowercase = ProcessorGradientFlow(device=self.device )
__lowercase = iterations
__lowercase = lr
__lowercase = log
__lowercase = make_grid
__lowercase = return_val
__lowercase = quantize
__lowercase = self.vqgan.decoder.z_shape
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[Any]=None ,lowercase__ : Tuple=None ,lowercase__ : Dict=5 ,lowercase__ : Any=True ):
__lowercase = []
if output_path is None:
__lowercase = '''./animation.gif'''
if input_path is None:
__lowercase = self.save_path
__lowercase = sorted(glob(input_path + '''/*''' ) )
if not len(lowercase__ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(lowercase__ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__lowercase = total_duration / len(lowercase__ )
__lowercase = [frame_duration] * len(lowercase__ )
if extend_frames:
__lowercase = 1.5
__lowercase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(lowercase__ ) )
imageio.mimsave(lowercase__ ,lowercase__ ,duration=lowercase__ )
print(F"gif saved to {output_path}" )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[Any]=None ,lowercase__ : str=None ):
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__lowercase = preprocess(Image.open(lowercase__ ) ,target_image_size=2_5_6 ).to(self.device )
__lowercase = preprocess_vqgan(lowercase__ )
__lowercase , *__lowercase = self.vqgan.encode(lowercase__ )
return z
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Dict ):
__lowercase = self.latent.detach().requires_grad_()
__lowercase = base_latent + transform_vector
if self.quantize:
__lowercase , *__lowercase = self.vqgan.quantize(lowercase__ )
else:
__lowercase = trans_latent
return self.vqgan.decode(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : Optional[int]=None ):
__lowercase = self.clip_preprocessor(text=lowercase__ ,images=lowercase__ ,return_tensors='''pt''' ,padding=lowercase__ )
__lowercase = self.clip(**lowercase__ )
__lowercase = clip_outputs.logits_per_image
if weights is not None:
__lowercase = similarity_logits * weights
return similarity_logits.sum()
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[int] ):
__lowercase = self._get_clip_similarity(pos_prompts['''prompts'''] ,lowercase__ ,weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__lowercase = self._get_clip_similarity(neg_prompts['''prompts'''] ,lowercase__ ,weights=neg_prompts['''weights'''] )
else:
__lowercase = torch.tensor([1] ,device=self.device )
__lowercase = -torch.log(lowercase__ ) + torch.log(lowercase__ )
return loss
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ,lowercase__ : str ):
__lowercase = torch.randn_like(self.latent ,requires_grad=lowercase__ ,device=self.device )
__lowercase = torch.optim.Adam([vector] ,lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowercase = self._add_vector(lowercase__ )
__lowercase = loop_post_process(lowercase__ )
__lowercase = self._get_CLIP_loss(lowercase__ ,lowercase__ ,lowercase__ )
print('''CLIP loss''' ,lowercase__ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=lowercase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any] ):
wandb.init(reinit=lowercase__ ,project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__lowercase = Image.open(lowercase__ )
__lowercase = image.resize((2_5_6, 2_5_6) )
wandb.log('''Original Image''' ,wandb.Image(lowercase__ ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Any ):
if not prompts:
return []
__lowercase = []
__lowercase = []
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(lowercase__ ,(tuple, list) ):
__lowercase = prompt[0]
__lowercase = float(prompt[1] )
elif ":" in prompt:
__lowercase , __lowercase = prompt.split(''':''' )
__lowercase = float(lowercase__ )
else:
__lowercase = prompt
__lowercase = 1.0
processed_prompts.append(lowercase__ )
weights.append(lowercase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase__ ,device=self.device ),
}
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : List[Any]=None ,lowercase__ : Dict=None ,lowercase__ : Optional[int]=True ,lowercase__ : str=False ,lowercase__ : Any=True ,lowercase__ : List[str]=True ,lowercase__ : int=None ,):
if image_path:
__lowercase = self._get_latent(lowercase__ )
else:
__lowercase = torch.randn(self.latent_dim ,device=self.device )
if self.log:
self._init_logging(lowercase__ ,lowercase__ ,lowercase__ )
assert pos_prompts, "You must provide at least one positive prompt."
__lowercase = self.process_prompts(lowercase__ )
__lowercase = self.process_prompts(lowercase__ )
if save_final and save_path is None:
__lowercase = os.path.join('''./outputs/''' ,'''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(lowercase__ ):
os.makedirs(lowercase__ )
else:
__lowercase = save_path + '''_''' + get_timestamp()
os.makedirs(lowercase__ )
__lowercase = save_path
__lowercase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(lowercase__ ) )
__lowercase = loop_post_process(lowercase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase__ ,lowercase__ ,lowercase__ ) ):
if show_intermediate:
show_pil(lowercase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path ,F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(lowercase__ )} )
if show_final:
show_pil(lowercase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path ,F"iter_{iter:03d}_final.png" ) )
| 41 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 1 |
'''simple docstring'''
def _A ( A__ , A__ ):
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def _A ( ):
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 41 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 1 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase__ = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def _A ( A__ ):
"""simple docstring"""
__lowercase = EfficientNetConfig()
__lowercase = CONFIG_MAP[model_name]['''hidden_dim''']
__lowercase = CONFIG_MAP[model_name]['''width_coef''']
__lowercase = CONFIG_MAP[model_name]['''depth_coef''']
__lowercase = CONFIG_MAP[model_name]['''image_size''']
__lowercase = CONFIG_MAP[model_name]['''dropout_rate''']
__lowercase = CONFIG_MAP[model_name]['''dw_padding''']
__lowercase = '''huggingface/label-files'''
__lowercase = '''imagenet-1k-id2label.json'''
__lowercase = 1000
__lowercase = json.load(open(hf_hub_download(A__ , A__ , repo_type='''dataset''' ) , '''r''' ) )
__lowercase = {int(A__ ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def _A ( ):
"""simple docstring"""
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
def _A ( A__ ):
"""simple docstring"""
__lowercase = CONFIG_MAP[model_name]['''image_size''']
__lowercase = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=A__ , )
return preprocessor
def _A ( A__ ):
"""simple docstring"""
__lowercase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
__lowercase = sorted(set(A__ ) )
__lowercase = len(A__ )
__lowercase = {b: str(A__ ) for b, i in zip(A__ , range(A__ ) )}
__lowercase = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
__lowercase = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
__lowercase = {}
for item in rename_keys:
if item[0] in original_param_names:
__lowercase = '''efficientnet.''' + item[1]
__lowercase = '''classifier.weight'''
__lowercase = '''classifier.bias'''
return key_mapping
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
__lowercase = key_mapping[key]
if "_conv" in key and "kernel" in key:
__lowercase = torch.from_numpy(A__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__lowercase = torch.from_numpy(A__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__lowercase = torch.from_numpy(np.transpose(A__ ) )
else:
__lowercase = torch.from_numpy(A__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(A__ )
@torch.no_grad()
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = model_classes[model_name](
include_top=A__ , weights='''imagenet''' , input_tensor=A__ , input_shape=A__ , pooling=A__ , classes=1000 , classifier_activation='''softmax''' , )
__lowercase = original_model.trainable_variables
__lowercase = original_model.non_trainable_variables
__lowercase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__lowercase = param.numpy()
__lowercase = list(tf_params.keys() )
# Load HuggingFace model
__lowercase = get_efficientnet_config(A__ )
__lowercase = EfficientNetForImageClassification(A__ ).eval()
__lowercase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
__lowercase = rename_keys(A__ )
replace_params(A__ , A__ , A__ )
# Initialize preprocessor and preprocess input image
__lowercase = convert_image_processor(A__ )
__lowercase = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
__lowercase = hf_model(**A__ )
__lowercase = outputs.logits.detach().numpy()
# Original model inference
__lowercase = False
__lowercase = CONFIG_MAP[model_name]['''image_size''']
__lowercase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__lowercase = image.img_to_array(A__ )
__lowercase = np.expand_dims(A__ , axis=0 )
__lowercase = original_model.predict(A__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(A__ , A__ , atol=1e-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(A__ ):
os.mkdir(A__ )
# Save converted model and image processor
hf_model.save_pretrained(A__ )
preprocessor.save_pretrained(A__ )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
__lowercase = F"efficientnet-{model_name}"
preprocessor.push_to_hub(A__ )
hf_model.push_to_hub(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 41 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 1 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = FlaxAutoencoderKL
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = 4
__lowercase = 3
__lowercase = (3_2, 3_2)
__lowercase = jax.random.PRNGKey(0 )
__lowercase = jax.random.uniform(lowercase__ ,((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__lowercase = self.dummy_input
return init_dict, inputs_dict
| 41 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 1 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : str ,lowercase__ : int=0.0_1 ,lowercase__ : List[Any]=1_0_0_0 ):
__lowercase = p_stop
__lowercase = max_length
def __iter__( self : List[str] ):
__lowercase = 0
__lowercase = False
while not stop and count < self.max_length:
yield count
count += 1
__lowercase = random.random() < self.p_stop
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : int ,lowercase__ : Tuple=False ,lowercase__ : Optional[Any]=True ):
__lowercase = [
BatchSamplerShard(lowercase__ ,2 ,lowercase__ ,split_batches=lowercase__ ,even_batches=lowercase__ )
for i in range(2 )
]
__lowercase = [list(lowercase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowercase__ ) for shard in batch_sampler_shards] ,[len(lowercase__ ) for e in expected] )
self.assertListEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
# Check the shards when the dataset is a round multiple of total batch size.
__lowercase = BatchSampler(range(2_4 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ )
__lowercase = BatchSampler(range(2_4 ) ,batch_size=3 ,drop_last=lowercase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase__ ,lowercase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowercase = BatchSampler(range(2_1 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ )
__lowercase = BatchSampler(range(2_1 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(2_2 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ )
__lowercase = BatchSampler(range(2_2 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(2_0 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ )
__lowercase = BatchSampler(range(2_0 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ )
__lowercase = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [[], []]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# Check the shards when the dataset is a round multiple of batch size.
__lowercase = BatchSampler(range(2_4 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ )
__lowercase = BatchSampler(range(2_4 ) ,batch_size=4 ,drop_last=lowercase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ )
# Check the shards when the dataset is not a round multiple of batch size.
__lowercase = BatchSampler(range(2_2 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ )
__lowercase = BatchSampler(range(2_2 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowercase = BatchSampler(range(2_1 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ )
__lowercase = BatchSampler(range(2_1 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ )
__lowercase = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [[], []]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
# Check the shards when the dataset is a round multiple of total batch size.
__lowercase = BatchSampler(range(2_4 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,even_batches=lowercase__ )
__lowercase = BatchSampler(range(2_4 ) ,batch_size=3 ,drop_last=lowercase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,even_batches=lowercase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowercase = BatchSampler(range(2_1 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,even_batches=lowercase__ )
__lowercase = BatchSampler(range(2_1 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,even_batches=lowercase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(2_2 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,even_batches=lowercase__ )
__lowercase = BatchSampler(range(2_2 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,even_batches=lowercase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowercase = BatchSampler(range(2_0 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,even_batches=lowercase__ )
__lowercase = BatchSampler(range(2_0 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,even_batches=lowercase__ )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [[[0, 1]], []]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,even_batches=lowercase__ )
__lowercase = BatchSampler(range(2 ) ,batch_size=3 ,drop_last=lowercase__ )
__lowercase = [[], []]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,even_batches=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# Check the shards when the dataset is a round multiple of batch size.
__lowercase = BatchSampler(range(2_4 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ ,even_batches=lowercase__ )
__lowercase = BatchSampler(range(2_4 ) ,batch_size=4 ,drop_last=lowercase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ ,even_batches=lowercase__ )
# Check the shards when the dataset is not a round multiple of batch size.
__lowercase = BatchSampler(range(2_2 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ ,even_batches=lowercase__ )
__lowercase = BatchSampler(range(2_2 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ ,even_batches=lowercase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowercase = BatchSampler(range(2_1 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ ,even_batches=lowercase__ )
__lowercase = BatchSampler(range(2_1 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ ,even_batches=lowercase__ )
# Check the shards when the dataset is very small.
__lowercase = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [[[0, 1]], []]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ ,even_batches=lowercase__ )
__lowercase = BatchSampler(range(2 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = [[], []]
self.check_batch_sampler_shards(lowercase__ ,lowercase__ ,split_batches=lowercase__ ,even_batches=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
__lowercase = [BatchSamplerShard(lowercase__ ,2 ,lowercase__ ,even_batches=lowercase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) ,3 )
self.assertEqual(len(batch_sampler_shards[1] ) ,2 )
self.assertListEqual(list(batch_sampler_shards[0] ) ,[[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) ,[[3, 4], [9, 1_0, 1_1]] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple=False ,lowercase__ : int=2 ,lowercase__ : Tuple=False ):
random.seed(lowercase__ )
__lowercase = list(lowercase__ )
__lowercase = [
IterableDatasetShard(
lowercase__ ,batch_size=lowercase__ ,drop_last=lowercase__ ,num_processes=lowercase__ ,process_index=lowercase__ ,split_batches=lowercase__ ,)
for i in range(lowercase__ )
]
__lowercase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowercase__ )
iterable_dataset_lists.append(list(lowercase__ ) )
__lowercase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowercase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
self.assertTrue(len(lowercase__ ) % shard_batch_size == 0 )
__lowercase = []
for idx in range(0 ,len(lowercase__ ) ,lowercase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowercase__ ) < len(lowercase__ ):
reference += reference
self.assertListEqual(lowercase__ ,reference[: len(lowercase__ )] )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = 4_2
__lowercase = RandomIterableDataset()
self.check_iterable_dataset_shards(lowercase__ ,lowercase__ ,batch_size=4 ,drop_last=lowercase__ ,split_batches=lowercase__ )
self.check_iterable_dataset_shards(lowercase__ ,lowercase__ ,batch_size=4 ,drop_last=lowercase__ ,split_batches=lowercase__ )
self.check_iterable_dataset_shards(lowercase__ ,lowercase__ ,batch_size=4 ,drop_last=lowercase__ ,split_batches=lowercase__ )
self.check_iterable_dataset_shards(lowercase__ ,lowercase__ ,batch_size=4 ,drop_last=lowercase__ ,split_batches=lowercase__ )
# Edge case with a very small dataset
__lowercase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowercase__ ,lowercase__ ,batch_size=4 ,drop_last=lowercase__ ,split_batches=lowercase__ )
self.check_iterable_dataset_shards(lowercase__ ,lowercase__ ,batch_size=4 ,drop_last=lowercase__ ,split_batches=lowercase__ )
self.check_iterable_dataset_shards(lowercase__ ,lowercase__ ,batch_size=4 ,drop_last=lowercase__ ,split_batches=lowercase__ )
self.check_iterable_dataset_shards(lowercase__ ,lowercase__ ,batch_size=4 ,drop_last=lowercase__ ,split_batches=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = BatchSampler(range(1_6 ) ,batch_size=4 ,drop_last=lowercase__ )
__lowercase = SkipBatchSampler(lowercase__ ,2 )
self.assertListEqual(list(lowercase__ ) ,[[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = SkipDataLoader(list(range(1_6 ) ) ,batch_size=4 ,skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] ,[[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = DataLoader(list(range(1_6 ) ) ,batch_size=4 )
__lowercase = skip_first_batches(lowercase__ ,num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] ,[[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = DataLoaderShard(list(range(1_6 ) ) ,batch_size=4 )
for idx, _ in enumerate(lowercase__ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowercase__ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
def SCREAMING_SNAKE_CASE ( self : str ):
Accelerator()
__lowercase = DataLoaderDispatcher(range(1_6 ) ,batch_size=4 )
for idx, _ in enumerate(lowercase__ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowercase__ ):
self.assertEqual(dataloader.end_of_dataloader ,idx == 3 )
| 41 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowerCAmelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _A ( A__ ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowercase = model_type_to_module_name(A__ )
__lowercase = importlib.import_module(F".{module_name}" , '''transformers.models''' )
try:
return getattr(A__ , A__ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(A__ , '''__name__''' , A__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowercase = importlib.import_module('''transformers''' )
if hasattr(A__ , A__ ):
return getattr(A__ , A__ )
return None
def _A ( A__ , A__ = None , A__ = False , A__ = False , A__ = None , A__ = None , A__ = None , A__ = False , **A__ , ):
"""simple docstring"""
__lowercase = get_file_from_repo(
A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(A__ , encoding='''utf-8''' ) as reader:
return json.load(A__ )
class lowercase_ :
"""simple docstring"""
def __init__( self : int ):
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(lowercase__ )
def SCREAMING_SNAKE_CASE ( cls : str ,lowercase__ : str ,**lowercase__ : int ):
__lowercase = kwargs.pop('''config''' ,lowercase__ )
__lowercase = kwargs.pop('''trust_remote_code''' ,lowercase__ )
__lowercase = True
__lowercase , __lowercase = ImageProcessingMixin.get_image_processor_dict(lowercase__ ,**lowercase__ )
__lowercase = config_dict.get('''image_processor_type''' ,lowercase__ )
__lowercase = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' ,{} ):
__lowercase = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__lowercase = config_dict.pop('''feature_extractor_type''' ,lowercase__ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
__lowercase = feature_extractor_class.replace('''FeatureExtractor''' ,'''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' ,{} ):
__lowercase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
__lowercase = feature_extractor_auto_map.replace('''FeatureExtractor''' ,'''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowercase__ ,lowercase__ ):
__lowercase = AutoConfig.from_pretrained(lowercase__ ,**lowercase__ )
# It could be in `config.image_processor_type``
__lowercase = getattr(lowercase__ ,'''image_processor_type''' ,lowercase__ )
if hasattr(lowercase__ ,'''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
__lowercase = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
__lowercase = image_processor_class_from_name(lowercase__ )
__lowercase = image_processor_auto_map is not None
__lowercase = image_processor_class is not None or type(lowercase__ ) in IMAGE_PROCESSOR_MAPPING
__lowercase = resolve_trust_remote_code(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if has_remote_code and trust_remote_code:
__lowercase = get_class_from_dynamic_module(
lowercase__ ,lowercase__ ,**lowercase__ )
__lowercase = kwargs.pop('''code_revision''' ,lowercase__ )
if os.path.isdir(lowercase__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowercase__ ,**lowercase__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowercase__ ,**lowercase__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowercase__ ) in IMAGE_PROCESSOR_MAPPING:
__lowercase = IMAGE_PROCESSOR_MAPPING[type(lowercase__ )]
return image_processor_class.from_dict(lowercase__ ,**lowercase__ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Tuple ,lowercase__ : Any ):
IMAGE_PROCESSOR_MAPPING.register(lowercase__ ,lowercase__ )
| 41 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def _A ( A__ = 2000000 ):
"""simple docstring"""
__lowercase = [0]
__lowercase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowercase = 0
# the area corresponding to the grid that gives the product closest to target
__lowercase = 0
# an estimate of b, using the quadratic formula
__lowercase = 42
# the largest integer less than b_estimate
__lowercase = 42
# the largest integer less than b_estimate
__lowercase = 42
# the triangle number corresponding to b_floor
__lowercase = 42
# the triangle number corresponding to b_ceil
__lowercase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowercase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowercase = floor(A__ )
__lowercase = ceil(A__ )
__lowercase = triangle_numbers[b_floor]
__lowercase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowercase = triangle_b_first_guess * triangle_a
__lowercase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowercase = triangle_b_second_guess * triangle_a
__lowercase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'{solution() = }')
| 41 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ ):
"""simple docstring"""
__lowercase = str(A__ )
return len(A__ ) == 9 and set(A__ ) == set('''123456789''' )
def _A ( ):
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
__lowercase = 100002 * base_num
if is_9_pandigital(A__ ):
return candidate
for base_num in range(333 , 99 , -1 ):
__lowercase = 1002003 * base_num
if is_9_pandigital(A__ ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 41 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : int ,lowercase__ : Union[str, "sqlalchemy.sql.Selectable"] ,lowercase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,lowercase__ : Optional[Features] = None ,lowercase__ : str = None ,lowercase__ : bool = False ,**lowercase__ : Union[str, Any] ,):
super().__init__(features=lowercase__ ,cache_dir=lowercase__ ,keep_in_memory=lowercase__ ,**lowercase__ )
__lowercase = Sql(
cache_dir=lowercase__ ,features=lowercase__ ,sql=lowercase__ ,con=lowercase__ ,**lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = None
__lowercase = None
__lowercase = None
__lowercase = None
self.builder.download_and_prepare(
download_config=lowercase__ ,download_mode=lowercase__ ,verification_mode=lowercase__ ,base_path=lowercase__ ,)
# Build dataset for splits
__lowercase = self.builder.as_dataset(
split='''train''' ,verification_mode=lowercase__ ,in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : Dataset ,lowercase__ : str ,lowercase__ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[int] = None ,**lowercase__ : str ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"num_proc {num_proc} must be an integer > 0." )
__lowercase = dataset
__lowercase = name
__lowercase = con
__lowercase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__lowercase = num_proc
__lowercase = to_sql_kwargs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.to_sql_kwargs.pop('''sql''' ,lowercase__ )
__lowercase = self.to_sql_kwargs.pop('''con''' ,lowercase__ )
__lowercase = self.to_sql_kwargs.pop('''index''' ,lowercase__ )
__lowercase = self._write(index=lowercase__ ,**self.to_sql_kwargs )
return written
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ):
__lowercase , __lowercase , __lowercase = args
__lowercase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs
__lowercase = query_table(
table=self.dataset.data ,key=slice(lowercase__ ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
__lowercase = batch.to_pandas()
__lowercase = df.to_sql(self.name ,self.con ,index=lowercase__ ,**lowercase__ )
return num_rows or len(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[int] ,**lowercase__ : Optional[int] ):
__lowercase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating SQL from Arrow format''' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
__lowercase , __lowercase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,lowercase__ ,lowercase__ )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='''ba''' ,disable=not logging.is_progress_bar_enabled() ,desc='''Creating SQL from Arrow format''' ,):
written += num_rows
return written
| 41 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 1 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''https://openaipublic.azureedge.net/jukebox/models/'''
lowerCAmelCase__ = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def _A ( A__ ):
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__lowercase = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__lowercase = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__lowercase = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__lowercase = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__lowercase = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__lowercase = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__lowercase = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__lowercase = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = {}
import re
__lowercase = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__lowercase = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__lowercase = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__lowercase = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__lowercase = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__lowercase = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__lowercase = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__lowercase = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__lowercase = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A__ ):
__lowercase = re_encoder_block_conv_in.match(A__ )
__lowercase = regex_match.groups()
__lowercase = int(groups[2] ) * 2 + int(groups[3] )
__lowercase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
__lowercase = re_encoder_block_conv_in.sub(A__ , A__ )
elif re_encoder_block_resnet.fullmatch(A__ ):
__lowercase = re_encoder_block_resnet.match(A__ )
__lowercase = regex_match.groups()
__lowercase = int(groups[2] ) * 2 + int(groups[3] )
__lowercase = {'''1''': 1, '''3''': 2}[groups[-2]]
__lowercase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
__lowercase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowercase = prefix + resnet_block
__lowercase = re_encoder_block_resnet.sub(A__ , A__ )
elif re_encoder_block_proj_out.fullmatch(A__ ):
__lowercase = re_encoder_block_proj_out.match(A__ )
__lowercase = regex_match.groups()
__lowercase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
__lowercase = re_encoder_block_proj_out.sub(A__ , A__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A__ ):
__lowercase = re_decoder_block_conv_out.match(A__ )
__lowercase = regex_match.groups()
__lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowercase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
__lowercase = re_decoder_block_conv_out.sub(A__ , A__ )
elif re_decoder_block_resnet.fullmatch(A__ ):
__lowercase = re_decoder_block_resnet.match(A__ )
__lowercase = regex_match.groups()
__lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowercase = {'''1''': 1, '''3''': 2}[groups[-2]]
__lowercase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
__lowercase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowercase = prefix + resnet_block
__lowercase = re_decoder_block_resnet.sub(A__ , A__ )
elif re_decoder_block_proj_in.fullmatch(A__ ):
__lowercase = re_decoder_block_proj_in.match(A__ )
__lowercase = regex_match.groups()
__lowercase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
__lowercase = re_decoder_block_proj_in.sub(A__ , A__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A__ ):
__lowercase = re_prior_cond_conv_out.match(A__ )
__lowercase = regex_match.groups()
__lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowercase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
__lowercase = re_prior_cond_conv_out.sub(A__ , A__ )
elif re_prior_cond_resnet.fullmatch(A__ ):
__lowercase = re_prior_cond_resnet.match(A__ )
__lowercase = regex_match.groups()
__lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowercase = {'''1''': 1, '''3''': 2}[groups[-2]]
__lowercase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
__lowercase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowercase = prefix + resnet_block
__lowercase = re_prior_cond_resnet.sub(A__ , A__ )
elif re_prior_cond_proj_in.fullmatch(A__ ):
__lowercase = re_prior_cond_proj_in.match(A__ )
__lowercase = regex_match.groups()
__lowercase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
__lowercase = re_prior_cond_proj_in.sub(A__ , A__ )
# keep original key
else:
__lowercase = original_key
__lowercase = replace_key(A__ )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
__lowercase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
__lowercase = original_key
__lowercase = original_key
__lowercase = value
return new_dict
@torch.no_grad()
def _A ( A__=None , A__=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
__lowercase = requests.get(F"{PREFIX}{file}" , allow_redirects=A__ )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=A__ )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , '''wb''' ).write(r.content )
__lowercase = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__lowercase = JukeboxConfig.from_pretrained(A__ )
__lowercase = JukeboxModel(A__ )
__lowercase = []
__lowercase = {}
for i, dict_name in enumerate(A__ ):
__lowercase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['''model''']
__lowercase = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__lowercase = old_dic[k]
elif k.endswith('''.w''' ):
__lowercase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__lowercase = old_dic[k]
else:
__lowercase = old_dic[k]
__lowercase = '''vqvae''' if i == 0 else F"priors.{3 - i}"
__lowercase = fix_jukebox_keys(A__ , model.state_dict() , A__ , A__ )
weight_dict.append(A__ )
__lowercase = weight_dict.pop(0 )
model.vqvae.load_state_dict(A__ )
for i in range(len(A__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A__ ).mkdir(exist_ok=A__ )
with open(F"{pytorch_dump_folder_path}/mapping.json" , '''w''' ) as txtfile:
json.dump(A__ , A__ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A__ )
return weight_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 41 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 1 |
'''simple docstring'''
import heapq
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(A__ , [-1 * len(A__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__lowercase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__lowercase = heapq.heappop(A__ )[1][0]
chosen_vertices.add(A__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__lowercase = elem[1][1].index(A__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(A__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 41 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase__ = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
lowerCAmelCase__ = {
'''169M''': 768,
'''430M''': 1024,
'''1B5''': 2048,
'''3B''': 2560,
'''7B''': 4096,
'''14B''': 5120,
}
def _A ( A__ ):
"""simple docstring"""
__lowercase = list(state_dict.keys() )
for name in state_dict_keys:
__lowercase = state_dict.pop(A__ )
# emb -> embedding
if name.startswith('''emb.''' ):
__lowercase = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
__lowercase = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
__lowercase = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , A__ )
# ffn -> feed_forward
__lowercase = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , A__ )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
__lowercase = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
__lowercase = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
__lowercase = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
__lowercase = '''rwkv.''' + name
__lowercase = weight
return state_dict
def _A ( A__ , A__ , A__ , A__=None , A__=None , A__=False , A__=None ):
"""simple docstring"""
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
__lowercase = 50277
__lowercase = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
__lowercase = PreTrainedTokenizerFast(tokenizer_file=A__ )
__lowercase = len(A__ )
tokenizer.save_pretrained(A__ )
# 2. Build the config
__lowercase = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowercase = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
__lowercase = RwkvConfig(
vocab_size=A__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(A__ )
# 3. Download model file then convert state_dict
__lowercase = hf_hub_download(A__ , A__ )
__lowercase = torch.load(A__ , map_location='''cpu''' )
__lowercase = convert_state_dict(A__ )
# 4. Split in shards and save
__lowercase , __lowercase = shard_checkpoint(A__ )
for shard_file, shard in shards.items():
torch.save(A__ , os.path.join(A__ , A__ ) )
if index is not None:
__lowercase = os.path.join(A__ , A__ )
# Save the index as well
with open(A__ , '''w''' , encoding='''utf-8''' ) as f:
__lowercase = json.dumps(A__ , indent=2 , sort_keys=A__ ) + '''\n'''
f.write(A__ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
__lowercase = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowercase = torch.load(os.path.join(A__ , A__ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(A__ , A__ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
__lowercase = AutoModelForCausalLM.from_pretrained(A__ )
model.push_to_hub(A__ , max_shard_size='''2GB''' )
tokenizer.push_to_hub(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 41 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowerCAmelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def _A ( ):
"""simple docstring"""
__lowercase = Github(os.environ['''GITHUB_TOKEN'''] )
__lowercase = g.get_repo('''huggingface/diffusers''' )
__lowercase = repo.get_issues(state='''open''' )
for issue in open_issues:
__lowercase = sorted(issue.get_comments() , key=lambda A__ : i.created_at , reverse=A__ )
__lowercase = comments[0] if len(A__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 1 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class lowercase_ (Generic[T] ):
"""simple docstring"""
def __init__( self : int ,lowercase__ : bool = True ):
__lowercase = {} # dictionary of lists
__lowercase = directed
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : T ,lowercase__ : T ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase__ )
self.adj_list[destination_vertex].append(lowercase__ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase__ )
__lowercase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase__ )
__lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__lowercase = [destination_vertex]
__lowercase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase__ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase__ )
__lowercase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__lowercase = [destination_vertex]
__lowercase = []
return self
def __repr__( self : Optional[int] ):
return pformat(self.adj_list )
| 41 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 1 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _A ( A__ ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _A ( ):
"""simple docstring"""
with parallel_backend('''spark''' ):
assert ParallelBackendConfig.backend_name == "spark"
__lowercase = [1, 2, 3]
with pytest.raises(A__ ):
with parallel_backend('''unsupported backend''' ):
map_nested(A__ , A__ , num_proc=2 )
with pytest.raises(A__ ):
with parallel_backend('''unsupported backend''' ):
map_nested(A__ , A__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('''num_proc''' , [2, -1] )
def _A ( A__ ):
"""simple docstring"""
__lowercase = [1, 2]
__lowercase = {'''a''': 1, '''b''': 2}
__lowercase = {'''a''': [1, 2], '''b''': [3, 4]}
__lowercase = {'''a''': {'''1''': 1}, '''b''': 2}
__lowercase = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
__lowercase = [2, 3]
__lowercase = {'''a''': 2, '''b''': 3}
__lowercase = {'''a''': [2, 3], '''b''': [4, 5]}
__lowercase = {'''a''': {'''1''': 2}, '''b''': 3}
__lowercase = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
with parallel_backend('''spark''' ):
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
assert map_nested(A__ , A__ , num_proc=A__ ) == expected_map_nested_sa
| 41 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 1 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = '''Hello world! cécé herlolip'''
lowerCAmelCase__ = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = BertAbsConfig(
temp_dir='''.''' , finetune_bert=A__ , large=A__ , share_emb=A__ , use_bert_emb=A__ , encoder='''bert''' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__lowercase = torch.load(A__ , lambda A__ , A__ : storage )
__lowercase = AbsSummarizer(A__ , torch.device('''cpu''' ) , A__ )
original.eval()
__lowercase = BertAbsSummarizer(A__ , torch.device('''cpu''' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('''convert the model''' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('''Make sure that the models\' outputs are identical''' )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
# prepare the model inputs
__lowercase = tokenizer.encode('''This is sample éàalj\'-.''' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
__lowercase = tokenizer.encode('''This is sample 3 éàalj\'-.''' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(A__ )) )
__lowercase = torch.tensor(A__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__lowercase = encoder_input_ids
__lowercase = decoder_input_ids
__lowercase = __lowercase = None
__lowercase = None
__lowercase = __lowercase = None
__lowercase = __lowercase = None
__lowercase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__lowercase = original(A__ , A__ , A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = original.generator(A__ )
__lowercase = new_model(
A__ , A__ , A__ , A__ , A__ )[0]
__lowercase = new_model.generator(A__ )
__lowercase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('''Maximum absolute difference beween weights: {:.2f}'''.format(A__ ) )
__lowercase = torch.allclose(A__ , A__ , atol=1e-3 )
if are_identical:
logging.info('''all weights are equal up to 1e-3''' )
else:
raise ValueError('''the weights are different. The new model is likely different from the original one.''' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('''saving the model\'s state dictionary''' )
torch.save(
new_model.state_dict() , '''./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 41 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 1 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
__lowercase , __lowercase = get_aligned_output_features_output_indices(lowercase__ ,lowercase__ ,lowercase__ )
self.assertEqual(lowercase__ ,['''c'''] )
self.assertEqual(lowercase__ ,[2] )
# Out indices set to match out features
__lowercase , __lowercase = get_aligned_output_features_output_indices(['''a''', '''c'''] ,lowercase__ ,lowercase__ )
self.assertEqual(lowercase__ ,['''a''', '''c'''] )
self.assertEqual(lowercase__ ,[0, 2] )
# Out features set to match out indices
__lowercase , __lowercase = get_aligned_output_features_output_indices(lowercase__ ,[0, 2] ,lowercase__ )
self.assertEqual(lowercase__ ,['''a''', '''c'''] )
self.assertEqual(lowercase__ ,[0, 2] )
# Out features selected from negative indices
__lowercase , __lowercase = get_aligned_output_features_output_indices(lowercase__ ,[-3, -1] ,lowercase__ )
self.assertEqual(lowercase__ ,['''a''', '''c'''] )
self.assertEqual(lowercase__ ,[-3, -1] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# Stage names must be set
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['''a''', '''b'''] ,(0, 1) ,lowercase__ )
# Out features must be a list
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(('''a''', '''b''') ,(0, 1) ,['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['''a''', '''b'''] ,(0, 1) ,['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(lowercase__ ,0 ,['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(lowercase__ ,(0, 1) ,['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['''a''', '''b'''] ,(0,) ,['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['''a''', '''b'''] ,(0, 2) ,['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(lowercase__ ):
verify_out_features_out_indices(['''b''', '''a'''] ,(0, 1) ,['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] ,(0, 1, -1) ,['''a''', '''b''', '''c''', '''d'''] )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = BackboneMixin()
__lowercase = ['''a''', '''b''', '''c''']
__lowercase = ['''a''', '''c''']
__lowercase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features ,['''a''', '''c'''] )
self.assertEqual(backbone.out_indices ,[0, 2] )
# Check out features and indices are updated correctly
__lowercase = ['''a''', '''b''']
self.assertEqual(backbone.out_features ,['''a''', '''b'''] )
self.assertEqual(backbone.out_indices ,[0, 1] )
__lowercase = [-3, -1]
self.assertEqual(backbone.out_features ,['''a''', '''c'''] )
self.assertEqual(backbone.out_indices ,[-3, -1] )
| 41 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 1 |
'''simple docstring'''
from collections.abc import Sequence
def _A ( A__ = None ):
"""simple docstring"""
if nums is None or not nums:
raise ValueError('''Input sequence should not be empty''' )
__lowercase = nums[0]
for i in range(1 , len(A__ ) ):
__lowercase = nums[i]
__lowercase = max(A__ , ans + num , A__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCAmelCase__ = int(input('''Enter number of elements : ''').strip())
lowerCAmelCase__ = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 41 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE : bool = field(default=lowerCamelCase__ , metadata={'help': 'Whether tp freeze the encoder.'} )
SCREAMING_SNAKE_CASE : bool = field(default=lowerCamelCase__ , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_2_8 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_4_2 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_4_2 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
SCREAMING_SNAKE_CASE : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
SCREAMING_SNAKE_CASE : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
SCREAMING_SNAKE_CASE : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=lowerCamelCase__ , metadata={'help': 'Source language id for translation.'} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=lowerCamelCase__ , metadata={'help': 'Target language id for translation.'} )
SCREAMING_SNAKE_CASE : Optional[int] = field(default=lowerCamelCase__ , metadata={'help': '# num_beams to use for evaluation.'} )
SCREAMING_SNAKE_CASE : bool = field(
default=lowerCamelCase__ , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
logger.info(F"***** {split} metrics *****" )
for key in sorted(metrics.keys() ):
logger.info(F" {key} = {metrics[key]}" )
save_json(A__ , os.path.join(A__ , F"{split}_results.json" ) )
def _A ( ):
"""simple docstring"""
__lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase = parser.parse_args_into_dataclasses()
check_output_dir(A__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , A__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(A__ , A__ , A__ ):
assert hasattr(A__ , A__ ), F"({config.__class__.__name__}) doesn't have a `{p}` attribute"
setattr(A__ , A__ , getattr(A__ , A__ ) )
__lowercase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=A__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(A__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__lowercase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(A__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(A__ , A__ ):
__lowercase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__lowercase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(A__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__lowercase = SeqaSeqDataset
# Get datasets
__lowercase = (
dataset_class(
A__ , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
__lowercase = (
dataset_class(
A__ , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__lowercase = (
dataset_class(
A__ , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__lowercase = (
build_compute_metrics_fn(data_args.task , A__ ) if training_args.predict_with_generate else None
)
__lowercase = SeqaSeqTrainer(
model=A__ , args=A__ , data_args=A__ , train_dataset=A__ , eval_dataset=A__ , data_collator=SeqaSeqDataCollator(
A__ , A__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=A__ , tokenizer=A__ , )
__lowercase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
__lowercase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__lowercase = train_result.metrics
__lowercase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , A__ , training_args.output_dir )
all_metrics.update(A__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__lowercase = trainer.evaluate(metric_key_prefix='''val''' )
__lowercase = data_args.n_val
__lowercase = round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
__lowercase = trainer.predict(test_dataset=A__ , metric_key_prefix='''test''' )
__lowercase = test_output.metrics
__lowercase = data_args.n_test
if trainer.is_world_process_zero():
__lowercase = round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , A__ , training_args.output_dir )
all_metrics.update(A__ )
if training_args.predict_with_generate:
__lowercase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
__lowercase = lmap(str.strip , A__ )
write_txt_file(A__ , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(A__ , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def _A ( A__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 41 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCAmelCase__ = 5_0000
lowerCAmelCase__ = 5000
lowerCAmelCase__ , lowerCAmelCase__ = os.path.split(__file__)
lowerCAmelCase__ = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def _A ( A__ , A__ ):
"""simple docstring"""
for i in range(A__ ):
__lowercase = dataset[i]
@get_duration
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for i in range(0 , len(A__ ) , A__ ):
__lowercase = dataset[i : i + batch_size]
@get_duration
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
__lowercase = dataset[i]
@get_duration
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
__lowercase = dataset[i : i + batch_size]
def _A ( ):
"""simple docstring"""
__lowercase = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__lowercase = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
__lowercase = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__lowercase = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__lowercase = generate_example_dataset(
os.path.join(A__ , '''dataset.arrow''' ) , A__ , num_examples=A__ , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
__lowercase = func(A__ , **A__ )
print('''shuffling dataset''' )
__lowercase = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(A__ ) )
__lowercase = func(
A__ , **A__ )
with open(A__ , '''wb''' ) as f:
f.write(json.dumps(A__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 41 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _A ( A__ ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )
if "model" in sd.keys():
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(A__ )
__lowercase = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__lowercase = sd.pop(A__ )
__lowercase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__lowercase = sd[key]
# We split QKV in separate Q,K,V
__lowercase = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__lowercase = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__lowercase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__lowercase , __lowercase , __lowercase = torch.split(A__ , depth // 3 , dim=0 )
__lowercase = q
__lowercase = k
__lowercase = v
del sd[key]
return sd
@torch.no_grad()
def _A ( A__ , A__ , A__=None ):
"""simple docstring"""
__lowercase = load_checkpoint(A__ )
if config is not None:
__lowercase = OPTConfig.from_pretrained(A__ )
else:
__lowercase = OPTConfig()
__lowercase = OPTModel(A__ ).half().eval()
model.load_state_dict(A__ )
# Check results
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
lowerCAmelCase__ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 41 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowerCAmelCase__ = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 1 |
'''simple docstring'''
lowerCAmelCase__ = 8.3_144_598
def _A ( A__ , A__ ):
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowerCAmelCase__ = 300
lowerCAmelCase__ = 28
lowerCAmelCase__ = rms_speed_of_molecule(temperature, molar_mass)
print(f'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 41 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'yolos'
def __init__( self : Union[str, Any] ,lowercase__ : Any=7_6_8 ,lowercase__ : Optional[Any]=1_2 ,lowercase__ : Optional[Any]=1_2 ,lowercase__ : List[Any]=3_0_7_2 ,lowercase__ : List[Any]="gelu" ,lowercase__ : Optional[int]=0.0 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0_2 ,lowercase__ : Any=1e-1_2 ,lowercase__ : Optional[Any]=[5_1_2, 8_6_4] ,lowercase__ : Union[str, Any]=1_6 ,lowercase__ : Optional[int]=3 ,lowercase__ : Any=True ,lowercase__ : str=1_0_0 ,lowercase__ : int=True ,lowercase__ : str=False ,lowercase__ : List[Any]=1 ,lowercase__ : List[Any]=5 ,lowercase__ : Dict=2 ,lowercase__ : int=5 ,lowercase__ : int=2 ,lowercase__ : Optional[Any]=0.1 ,**lowercase__ : int ,):
super().__init__(**lowercase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return 1e-4
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return 1_2
| 41 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase__ = 1.6_021e-19 # units = C
def _A ( A__ , A__ , A__ , ):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (PNDMScheduler,)
SCREAMING_SNAKE_CASE : str = (('num_inference_steps', 5_0),)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,**lowercase__ : List[Any] ):
__lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
}
config.update(**lowercase__ )
return config
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str]=0 ,**lowercase__ : Tuple ):
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' ,lowercase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config(**lowercase__ )
__lowercase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__lowercase = scheduler_class.from_pretrained(lowercase__ )
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = new_scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowercase = scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = new_scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : int ):
pass
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Optional[Any]=0 ,**lowercase__ : Any ):
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' ,lowercase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# copy over dummy past residuals (must be after setting timesteps)
__lowercase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase__ )
__lowercase = scheduler_class.from_pretrained(lowercase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase__ )
# copy over dummy past residual (must be after setting timesteps)
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = new_scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowercase = scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = new_scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ,**lowercase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Any ,**lowercase__ : Dict ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**lowercase__ )
__lowercase = scheduler_class(**lowercase__ )
__lowercase = 1_0
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
scheduler.set_timesteps(lowercase__ )
for i, t in enumerate(scheduler.prk_timesteps ):
__lowercase = model(lowercase__ ,lowercase__ )
__lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__lowercase = model(lowercase__ ,lowercase__ )
__lowercase = scheduler.step_plms(lowercase__ ,lowercase__ ,lowercase__ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop('''num_inference_steps''' ,lowercase__ )
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase__ ,'''set_timesteps''' ):
scheduler.set_timesteps(lowercase__ )
elif num_inference_steps is not None and not hasattr(lowercase__ ,'''set_timesteps''' ):
__lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__lowercase = dummy_past_residuals[:]
__lowercase = scheduler.step_prk(lowercase__ ,0 ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = scheduler.step_prk(lowercase__ ,1 ,lowercase__ ,**lowercase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
__lowercase = scheduler.step_plms(lowercase__ ,0 ,lowercase__ ,**lowercase__ ).prev_sample
__lowercase = scheduler.step_plms(lowercase__ ,1 ,lowercase__ ,**lowercase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase__ )
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(steps_offset=1 )
__lowercase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) ,)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] ,[0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowercase__ ,beta_end=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
for t, num_inference_steps in zip([1, 5, 1_0] ,[1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__lowercase = 2_7
for scheduler_class in self.scheduler_classes:
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
scheduler.set_timesteps(lowercase__ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__lowercase = scheduler.step_prk(lowercase__ ,lowercase__ ,lowercase__ ).prev_sample
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
with self.assertRaises(lowercase__ ):
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowercase__ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.full_loop()
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.full_loop(prediction_type='''v_prediction''' )
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int ):
# We specify different beta, so that the first alpha is 0.99
__lowercase = self.full_loop(set_alpha_to_one=lowercase__ ,beta_start=0.0_1 )
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
__lowercase = self.full_loop(set_alpha_to_one=lowercase__ ,beta_start=0.0_1 )
__lowercase = torch.sum(torch.abs(lowercase__ ) )
__lowercase = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 41 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = UnCLIPImageVariationPipeline
SCREAMING_SNAKE_CASE : str = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
SCREAMING_SNAKE_CASE : List[str] = IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE : List[str] = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
SCREAMING_SNAKE_CASE : int = False
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return 1_0_0
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,)
return CLIPTextModelWithProjection(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Any ):
torch.manual_seed(0 )
__lowercase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,num_hidden_layers=5 ,num_attention_heads=4 ,image_size=3_2 ,intermediate_size=3_7 ,patch_size=1 ,)
return CLIPVisionModelWithProjection(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
__lowercase = UnCLIPTextProjModel(**lowercase__ )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
torch.manual_seed(0 )
__lowercase = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
__lowercase = UNetaDConditionModel(**lowercase__ )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
torch.manual_seed(0 )
__lowercase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
__lowercase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.dummy_decoder
__lowercase = self.dummy_text_proj
__lowercase = self.dummy_text_encoder
__lowercase = self.dummy_tokenizer
__lowercase = self.dummy_super_res_first
__lowercase = self.dummy_super_res_last
__lowercase = UnCLIPScheduler(
variance_type='''learned_range''' ,prediction_type='''epsilon''' ,num_train_timesteps=1_0_0_0 ,)
__lowercase = UnCLIPScheduler(
variance_type='''fixed_small_log''' ,prediction_type='''epsilon''' ,num_train_timesteps=1_0_0_0 ,)
__lowercase = CLIPImageProcessor(crop_size=3_2 ,size=3_2 )
__lowercase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : List[Any]=0 ,lowercase__ : List[Any]=True ):
__lowercase = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(lowercase__ ) ).to(lowercase__ )
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
if pil_image:
__lowercase = input_image * 0.5 + 0.5
__lowercase = input_image.clamp(0 ,1 )
__lowercase = input_image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
__lowercase = DiffusionPipeline.numpy_to_pil(lowercase__ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowercase__ )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = pipe(**lowercase__ )
__lowercase = output.images
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = pipe(
**lowercase__ ,return_dict=lowercase__ ,)[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowercase__ )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = pipe(**lowercase__ )
__lowercase = output.images
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = pipe(
**lowercase__ ,return_dict=lowercase__ ,)[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowercase__ )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
__lowercase = pipe(**lowercase__ )
__lowercase = output.images
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
__lowercase = pipe(
**lowercase__ ,return_dict=lowercase__ ,)[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
__lowercase = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = torch.device('''cpu''' )
class lowercase_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 1
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowercase__ )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(0 )
__lowercase = pipe.decoder.dtype
__lowercase = 1
__lowercase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__lowercase = pipe.prepare_latents(
lowercase__ ,dtype=lowercase__ ,device=lowercase__ ,generator=lowercase__ ,latents=lowercase__ ,scheduler=DummyScheduler() )
__lowercase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__lowercase = pipe.prepare_latents(
lowercase__ ,dtype=lowercase__ ,device=lowercase__ ,generator=lowercase__ ,latents=lowercase__ ,scheduler=DummyScheduler() )
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
__lowercase = pipe(
**lowercase__ ,decoder_latents=lowercase__ ,super_res_latents=lowercase__ ).images
__lowercase = self.get_dummy_inputs(lowercase__ ,pil_image=lowercase__ )
# Don't pass image, instead pass embedding
__lowercase = pipeline_inputs.pop('''image''' )
__lowercase = pipe.image_encoder(lowercase__ ).image_embeds
__lowercase = pipe(
**lowercase__ ,decoder_latents=lowercase__ ,super_res_latents=lowercase__ ,image_embeddings=lowercase__ ,).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__lowercase = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=lowercase__ ,expected_max_diff=lowercase__ )
@skip_mps
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = torch_device == '''cpu'''
__lowercase = True
__lowercase = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=lowercase__ ,relax_max_difference=lowercase__ ,additional_params_copy_to_batched_inputs=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__lowercase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=lowercase__ ,additional_params_copy_to_batched_inputs=lowercase__ ,)
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=lowercase__ )
@skip_mps
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return super().test_save_load_local()
@skip_mps
def SCREAMING_SNAKE_CASE ( self : str ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
__lowercase = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' ,torch_dtype=torch.floataa )
__lowercase = pipeline.to(lowercase__ )
pipeline.set_progress_bar_config(disable=lowercase__ )
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipeline(
lowercase__ ,generator=lowercase__ ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(lowercase__ ,lowercase__ ,1_5 )
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 1 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 1 |
'''simple docstring'''
import argparse
import struct
import unittest
class lowercase_ :
"""simple docstring"""
def __init__( self : Any ,lowercase__ : bytes ):
__lowercase = data
# Initialize hash values
__lowercase = [
0x6a_09e_667,
0xbb_67a_e85,
0x3c_6ef_372,
0xa5_4ff_53a,
0x51_0e5_27f,
0x9b_056_88c,
0x1f_83d_9ab,
0x5b_e0c_d19,
]
# Initialize round constants
__lowercase = [
0x42_8a2_f98,
0x71_374_491,
0xb5_c0f_bcf,
0xe9_b5d_ba5,
0x39_56c_25b,
0x59_f11_1f1,
0x92_3f8_2a4,
0xab_1c5_ed5,
0xd8_07a_a98,
0x12_835_b01,
0x24_318_5be,
0x55_0c7_dc3,
0x72_be5_d74,
0x80_deb_1fe,
0x9b_dc0_6a7,
0xc1_9bf_174,
0xe4_9b6_9c1,
0xef_be4_786,
0x0f_c19_dc6,
0x24_0ca_1cc,
0x2d_e92_c6f,
0x4a_748_4aa,
0x5c_b0a_9dc,
0x76_f98_8da,
0x98_3e5_152,
0xa8_31c_66d,
0xb0_032_7c8,
0xbf_597_fc7,
0xc6_e00_bf3,
0xd5_a79_147,
0x06_ca6_351,
0x14_292_967,
0x27_b70_a85,
0x2e_1b2_138,
0x4d_2c6_dfc,
0x53_380_d13,
0x65_0a7_354,
0x76_6a0_abb,
0x81_c2c_92e,
0x92_722_c85,
0xa2_bfe_8a1,
0xa8_1a6_64b,
0xc2_4b8_b70,
0xc7_6c5_1a3,
0xd1_92e_819,
0xd6_990_624,
0xf4_0e3_585,
0x10_6aa_070,
0x19_a4c_116,
0x1e_376_c08,
0x27_487_74c,
0x34_b0b_cb5,
0x39_1c0_cb3,
0x4e_d8a_a4a,
0x5b_9cc_a4f,
0x68_2e6_ff3,
0x74_8f8_2ee,
0x78_a56_36f,
0x84_c87_814,
0x8c_c70_208,
0x90_bef_ffa,
0xa4_506_ceb,
0xbe_f9a_3f7,
0xc6_717_8f2,
]
__lowercase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : bytes ):
__lowercase = b'''\x80''' + (b'''\x00''' * (6_3 - (len(lowercase__ ) + 8) % 6_4))
__lowercase = struct.pack('''>Q''' ,(len(lowercase__ ) * 8) )
return data + padding + big_endian_integer
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# Convert into blocks of 64 bytes
__lowercase = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 ,len(self.preprocessed_data ) ,6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__lowercase = list(struct.unpack('''>16L''' ,lowercase__ ) )
# add 48 0-ed integers
words += [0] * 4_8
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = self.hashes
for index in range(0 ,6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
__lowercase = (
self.ror(words[index - 1_5] ,7 )
^ self.ror(words[index - 1_5] ,1_8 )
^ (words[index - 1_5] >> 3)
)
__lowercase = (
self.ror(words[index - 2] ,1_7 )
^ self.ror(words[index - 2] ,1_9 )
^ (words[index - 2] >> 1_0)
)
__lowercase = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
__lowercase = self.ror(lowercase__ ,6 ) ^ self.ror(lowercase__ ,1_1 ) ^ self.ror(lowercase__ ,2_5 )
__lowercase = (e & f) ^ ((~e & 0xff_fff_fff) & g)
__lowercase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
__lowercase = self.ror(lowercase__ ,2 ) ^ self.ror(lowercase__ ,1_3 ) ^ self.ror(lowercase__ ,2_2 )
__lowercase = (a & b) ^ (a & c) ^ (b & c)
__lowercase = (sa + maj) % 0x100_000_000
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
__lowercase = [a, b, c, d, e, f, g, h]
# Modify final values
__lowercase = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
__lowercase = ''''''.join([hex(lowercase__ )[2:].zfill(8 ) for value in self.hashes] )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ,lowercase__ : int ):
return 0xff_fff_fff & (value << (3_2 - rotations)) | (value >> rotations)
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
import hashlib
__lowercase = bytes('''Test String''' ,'''utf-8''' )
self.assertEqual(SHAaaa(lowercase__ ).hash ,hashlib.shaaaa(lowercase__ ).hexdigest() )
def _A ( ):
"""simple docstring"""
import doctest
doctest.testmod()
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
__lowercase = parser.parse_args()
__lowercase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
__lowercase = f.read()
else:
__lowercase = bytes(A__ , '''utf-8''' )
print(SHAaaa(A__ ).hash )
if __name__ == "__main__":
main()
| 41 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 1 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
lowerCAmelCase__ = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
lowerCAmelCase__ = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
lowerCAmelCase__ = BeautifulSoup(res.text, '''html.parser''')
lowerCAmelCase__ = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 41 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = KandinskyVaaImgaImgPipeline
SCREAMING_SNAKE_CASE : List[str] = ['image_embeds', 'negative_image_embeds', 'image']
SCREAMING_SNAKE_CASE : List[Any] = [
'image_embeds',
'negative_image_embeds',
'image',
]
SCREAMING_SNAKE_CASE : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
SCREAMING_SNAKE_CASE : int = False
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return 3_2
@property
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE ( self : Tuple ):
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return 1_0_0
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
torch.manual_seed(0 )
__lowercase = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__lowercase = UNetaDConditionModel(**lowercase__ )
return model
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
torch.manual_seed(0 )
__lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.dummy_unet
__lowercase = self.dummy_movq
__lowercase = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__lowercase = DDIMScheduler(**lowercase__ )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ,lowercase__ : List[str]=0 ):
__lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(lowercase__ ) ).to(lowercase__ )
__lowercase = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
lowercase__ )
# create init_image
__lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(lowercase__ ) ).to(lowercase__ )
__lowercase = image.cpu().permute(0 ,2 ,3 ,1 )[0]
__lowercase = Image.fromarray(np.uinta(lowercase__ ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = '''cpu'''
__lowercase = self.get_dummy_components()
__lowercase = self.pipeline_class(**lowercase__ )
__lowercase = pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = pipe(**self.get_dummy_inputs(lowercase__ ) )
__lowercase = output.images
__lowercase = pipe(
**self.get_dummy_inputs(lowercase__ ) ,return_dict=lowercase__ ,)[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase = np.array(
[0.6_1_9_9_7_7_8, 0.6_3_9_8_4_4_0_6, 0.4_6_1_4_5_7_8_5, 0.6_2_9_4_4_9_8_4, 0.5_6_2_2_2_1_5, 0.4_7_3_0_6_1_3_2, 0.4_7_4_4_1_4_5_6, 0.4_6_0_7_6_0_6, 0.4_8_7_1_9_2_6_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
__lowercase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__lowercase = '''A red cartoon frog, 4k'''
__lowercase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(lowercase__ )
__lowercase = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' ,torch_dtype=torch.floataa )
__lowercase = pipeline.to(lowercase__ )
pipeline.set_progress_bar_config(disable=lowercase__ )
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase , __lowercase = pipe_prior(
lowercase__ ,generator=lowercase__ ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
__lowercase = pipeline(
image=lowercase__ ,image_embeds=lowercase__ ,negative_image_embeds=lowercase__ ,generator=lowercase__ ,num_inference_steps=1_0_0 ,height=7_6_8 ,width=7_6_8 ,strength=0.2 ,output_type='''np''' ,)
__lowercase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(lowercase__ ,lowercase__ )
| 41 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = LxmertConfig.from_json_file(A__ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase = LxmertForPreTraining(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(A__ , A__ , A__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 41 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase__ = logging.get_logger(__name__)
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = b.T
__lowercase = np.sum(np.square(A__ ) , axis=1 )
__lowercase = np.sum(np.square(A__ ) , axis=0 )
__lowercase = np.matmul(A__ , A__ )
__lowercase = aa[:, None] - 2 * ab + ba[None, :]
return d
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = x.reshape(-1 , 3 )
__lowercase = squared_euclidean_distance(A__ , A__ )
return np.argmin(A__ , axis=1 )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ['pixel_values']
def __init__( self : Any ,lowercase__ : Optional[Union[List[List[int]], np.ndarray]] = None ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase__ : bool = True ,lowercase__ : bool = True ,**lowercase__ : Optional[Any] ,):
super().__init__(**lowercase__ )
__lowercase = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
__lowercase = get_size_dict(lowercase__ )
__lowercase = np.array(lowercase__ ) if clusters is not None else None
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_normalize
__lowercase = do_color_quantize
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : str ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
lowercase__ ,size=(size['''height'''], size['''width''']) ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,):
__lowercase = rescale(image=lowercase__ ,scale=1 / 1_2_7.5 ,data_format=lowercase__ )
__lowercase = image - 1
return image
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : ImageInput ,lowercase__ : bool = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[Union[List[List[int]], np.ndarray]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST ,**lowercase__ : Any ,):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowercase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__lowercase = clusters if clusters is not None else self.clusters
__lowercase = np.array(lowercase__ )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
if do_color_quantize:
__lowercase = [to_channel_dimension_format(lowercase__ ,ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__lowercase = np.array(lowercase__ )
__lowercase = color_quantize(lowercase__ ,lowercase__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__lowercase = images.shape[0]
__lowercase = images.reshape(lowercase__ ,-1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__lowercase = list(lowercase__ )
else:
__lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__lowercase = {'''input_ids''': images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
| 41 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''ViTFeatureExtractor''']
lowerCAmelCase__ = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 1 |
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not isinstance(A__ , A__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
__lowercase = str(A__ )
__lowercase = ''''''.join(sorted(A__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( A__ = 99 ):
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(A__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(99)}')
| 41 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = FunnelConfig.from_json_file(A__ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase = FunnelBaseModel(A__ ) if base_model else FunnelModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(A__ , A__ , A__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 41 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 1 |
'''simple docstring'''
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = [1]
for i in range(2 , A__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__lowercase = []
__lowercase = list(range(A__ ) )
# Find permutation
while factorials:
__lowercase = factorials.pop()
__lowercase , __lowercase = divmod(A__ , A__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 1 |
'''simple docstring'''
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ):
__lowercase = 0
__lowercase = 0
__lowercase = {}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ):
if vertex not in self.adjacency:
__lowercase = {}
self.num_vertices += 1
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ):
self.add_vertex(lowercase__ )
self.add_vertex(lowercase__ )
if head == tail:
return
__lowercase = weight
__lowercase = weight
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_edges()
for edge in edges:
__lowercase , __lowercase , __lowercase = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase__ ) ):
__lowercase = list(edges[i] )
edges.sort(key=lambda lowercase__ : e[2] )
for i in range(len(lowercase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__lowercase = edges[i][2] + 1
for edge in edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = weight
__lowercase = weight
def __str__( self : Union[str, Any] ):
__lowercase = ''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__lowercase = self.adjacency[head][tail]
string += F"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return self.adjacency.keys()
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : str=None ,lowercase__ : Any=None ):
__lowercase = Graph()
if vertices is None:
__lowercase = []
if edges is None:
__lowercase = []
for vertex in vertices:
g.add_vertex(lowercase__ )
for edge in edges:
g.add_edge(*lowercase__ )
return g
class lowercase_ :
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = {}
__lowercase = {}
def __len__( self : Dict ):
return len(self.parent )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Any ):
if item in self.parent:
return self.find(lowercase__ )
__lowercase = item
__lowercase = 0
return item
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Dict ):
if item not in self.parent:
return self.make_set(lowercase__ )
if item != self.parent[item]:
__lowercase = self.find(self.parent[item] )
return self.parent[item]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Tuple ,lowercase__ : Dict ):
__lowercase = self.find(lowercase__ )
__lowercase = self.find(lowercase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__lowercase = roota
return roota
if self.rank[roota] < self.rank[roota]:
__lowercase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__lowercase = roota
return roota
return None
@staticmethod
def SCREAMING_SNAKE_CASE ( lowercase__ : Optional[int] ):
__lowercase = graph.num_vertices
__lowercase = Graph.UnionFind()
__lowercase = []
while num_components > 1:
__lowercase = {}
for vertex in graph.get_vertices():
__lowercase = -1
__lowercase = graph.get_edges()
for edge in edges:
__lowercase , __lowercase , __lowercase = edge
edges.remove((tail, head, weight) )
for edge in edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = union_find.find(lowercase__ )
__lowercase = union_find.find(lowercase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowercase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowercase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__lowercase , __lowercase , __lowercase = cheap_edge[vertex]
if union_find.find(lowercase__ ) != union_find.find(lowercase__ ):
union_find.union(lowercase__ ,lowercase__ )
mst_edges.append(cheap_edge[vertex] )
__lowercase = num_components - 1
__lowercase = Graph.build(edges=lowercase__ )
return mst
| 41 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 1 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _A ( A__ ):
"""simple docstring"""
for param in module.parameters():
__lowercase = False
def _A ( ):
"""simple docstring"""
__lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__lowercase = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _A ( A__ ):
"""simple docstring"""
__lowercase = plt.imshow(A__ )
fig.axes.get_xaxis().set_visible(A__ )
fig.axes.get_yaxis().set_visible(A__ )
plt.show()
def _A ( ):
"""simple docstring"""
__lowercase = datetime.now()
__lowercase = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 41 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowerCAmelCase__ = {
'''google/bigbird-roberta-base''': 4096,
'''google/bigbird-roberta-large''': 4096,
'''google/bigbird-base-trivia-itc''': 4096,
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : str ,lowercase__ : int ,lowercase__ : Optional[Any]="<unk>" ,lowercase__ : str="<s>" ,lowercase__ : List[Any]="</s>" ,lowercase__ : Dict="<pad>" ,lowercase__ : int="[SEP]" ,lowercase__ : str="[MASK]" ,lowercase__ : Tuple="[CLS]" ,lowercase__ : Optional[Dict[str, Any]] = None ,**lowercase__ : str ,):
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else bos_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else eos_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else unk_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else pad_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else cls_token
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ ) if isinstance(lowercase__ ,lowercase__ ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase__ ,eos_token=lowercase__ ,unk_token=lowercase__ ,pad_token=lowercase__ ,sep_token=lowercase__ ,mask_token=lowercase__ ,cls_token=lowercase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase__ ,)
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase__ )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self : str ,lowercase__ : Optional[int] ):
__lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : str ):
return self.sp_model.encode(lowercase__ ,out_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Dict ):
return self.sp_model.piece_to_id(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ):
__lowercase = self.sp_model.IdToPiece(lowercase__ )
return token
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ):
__lowercase = []
__lowercase = ''''''
__lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
__lowercase = True
__lowercase = []
else:
current_sub_tokens.append(lowercase__ )
__lowercase = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[int] ,lowercase__ : bool = False ,lowercase__ : bool = None ,lowercase__ : bool = True ,**lowercase__ : Optional[Any] ,):
__lowercase = kwargs.pop('''use_source_tokenizer''' ,lowercase__ )
__lowercase = self.convert_ids_to_tokens(lowercase__ ,skip_special_tokens=lowercase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowercase = []
__lowercase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase__ ) )
__lowercase = []
sub_texts.append(lowercase__ )
else:
current_sub_text.append(lowercase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowercase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__lowercase = re.sub(r''' (\[(MASK|SEP)\])''' ,r'''\1''' ,''' '''.join(lowercase__ ) )
else:
__lowercase = ''''''.join(lowercase__ )
__lowercase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowercase = self.clean_up_tokenization(lowercase__ )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : str ,lowercase__ : Optional[str] = None ):
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase = os.path.join(
lowercase__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ ,'''wb''' ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ,lowercase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ ,token_ids_a=lowercase__ ,already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1] + ([0] * len(lowercase__ )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[int] ,lowercase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 41 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 1 |
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = XGLMTokenizer
SCREAMING_SNAKE_CASE : int = XGLMTokenizerFast
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Tuple = True
def SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = XGLMTokenizer(lowercase__ ,keep_accents=lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = '''<pad>'''
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) ,lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'''<s>''' )
self.assertEqual(vocab_keys[1] ,'''<pad>''' )
self.assertEqual(len(lowercase__ ) ,1_0_0_8 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_8 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = XGLMTokenizer(lowercase__ ,keep_accents=lowercase__ )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase__ ) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,)
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
__lowercase = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(
lowercase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] ,)
__lowercase = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase__ ,f.name )
__lowercase = XGLMTokenizer(f.name ,keep_accents=lowercase__ )
__lowercase = pickle.dumps(lowercase__ )
pickle.loads(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
if not self.test_rust_tokenizer:
return
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = '''I was born in 92000, and this is falsé.'''
__lowercase = tokenizer.tokenize(lowercase__ )
__lowercase = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
__lowercase = self.get_rust_tokenizer()
__lowercase = tokenizer.encode(lowercase__ )
__lowercase = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ ,lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''Hello World!'''
__lowercase = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowercase__ ,self.big_tokenizer.encode(lowercase__ ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
__lowercase = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowercase__ ,self.big_tokenizer.encode(lowercase__ ) )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
# fmt: off
__lowercase = {
'''input_ids''': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ ,model_name='''facebook/xglm-564M''' ,padding=lowercase__ ,)
| 41 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase__ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 1 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : list[tuple[float, float]] ):
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(lowercase__ ) - 1
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,lowercase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase__ ) ,5 ) == 1
return output_values
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(lowercase__ )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : float = 0.0_1 ):
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(lowercase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
lowercase__ ,lowercase__ ,color='''blue''' ,label='''Curve of Degree ''' + str(self.degree ) ,)
plt.scatter(lowercase__ ,lowercase__ ,color='''red''' ,label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 41 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowercase__ : int ,lowercase__ : Any=7 ,lowercase__ : Optional[Any]=3 ,lowercase__ : Any=1_8 ,lowercase__ : Optional[Any]=3_0 ,lowercase__ : Any=4_0_0 ,lowercase__ : str=True ,lowercase__ : Union[str, Any]=3_2 ,lowercase__ : Tuple=True ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = num_channels
__lowercase = image_size
__lowercase = min_resolution
__lowercase = max_resolution
__lowercase = do_resize
__lowercase = size_divisor
__lowercase = do_rescale
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ ,'''do_resize''' ) )
self.assertTrue(hasattr(lowercase__ ,'''size_divisor''' ) )
self.assertTrue(hasattr(lowercase__ ,'''resample''' ) )
self.assertTrue(hasattr(lowercase__ ,'''do_rescale''' ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE ( self : Dict ):
# Initialize image_processing
__lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowercase__ ,torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ ,torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__lowercase = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 41 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 1 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = list(range(len(A__ ) ) )
__lowercase = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda A__ : ratio[i] , reverse=A__ )
__lowercase = 0
__lowercase = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
__lowercase = 1
max_value += value[i]
capacity -= weight[i]
else:
__lowercase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Tuple ,lowercase__ : Optional[Any]=1_3 ,lowercase__ : Optional[Any]=7 ,lowercase__ : Tuple=True ,lowercase__ : Optional[Any]=True ,lowercase__ : Tuple=True ,lowercase__ : int=True ,lowercase__ : Any=9_9 ,lowercase__ : Optional[Any]=6_4 ,lowercase__ : Dict=3_2 ,lowercase__ : Tuple=5 ,lowercase__ : Optional[Any]=4 ,lowercase__ : Union[str, Any]=3_7 ,lowercase__ : List[str]="gelu" ,lowercase__ : Optional[Any]=0.1 ,lowercase__ : str=0.1 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : List[str]=1_6 ,lowercase__ : Optional[int]=2 ,lowercase__ : int=0.0_2 ,lowercase__ : Any=3 ,lowercase__ : Optional[Any]=4 ,lowercase__ : List[Any]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = embedding_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : Any ,lowercase__ : Union[str, Any] ):
__lowercase = MegatronBertModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : Any ,lowercase__ : str ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : int ):
__lowercase = MegatronBertForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = MegatronBertForCausalLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Any ,lowercase__ : Any ):
__lowercase = MegatronBertForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : List[Any] ,lowercase__ : Any ):
__lowercase = MegatronBertForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Dict ,lowercase__ : Any ):
__lowercase = MegatronBertForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ):
__lowercase = self.num_labels
__lowercase = MegatronBertForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : Dict ):
__lowercase = self.num_labels
__lowercase = MegatronBertForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : Dict ):
__lowercase = self.num_choices
__lowercase = MegatronBertForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Optional[int] = True
# test_resize_embeddings = False
SCREAMING_SNAKE_CASE : str = False
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Union[str, Any] ,lowercase__ : str ,lowercase__ : Dict=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = MegatronBertModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*lowercase__ )
def _A ( A__ ):
"""simple docstring"""
return torch.tensor(
A__ , dtype=torch.long , device=A__ , )
lowerCAmelCase__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__lowercase = os.path.join(os.environ['''MYDIR'''] ,lowercase__ )
__lowercase = MegatronBertModel.from_pretrained(lowercase__ )
model.to(lowercase__ )
model.half()
__lowercase = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__lowercase = model(lowercase__ )[0]
__lowercase = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8]
for ii in range(3 ):
for jj in range(3 ):
__lowercase = output[0, ii, jj]
__lowercase = expected[3 * ii + jj]
__lowercase = '''ii={} jj={} a={} b={}'''.format(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
self.assertTrue(math.isclose(lowercase__ ,lowercase__ ,rel_tol=lowercase__ ,abs_tol=lowercase__ ) ,msg=lowercase__ )
| 41 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 1 |
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase ( snake_case, snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :List[str] = BigBirdConfig.from_json_file(snake_case )
print(f'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
__magic_name__ :List[Any] = BigBirdForQuestionAnswering(snake_case )
else:
__magic_name__ :List[Any] = BigBirdForPreTraining(snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(snake_case, snake_case, is_trivia_qa=snake_case )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 0 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 0 |
from __future__ import annotations
from collections.abc import Callable
__snake_case = list[list[float | int]]
def _A ( _lowercase , _lowercase ) -> Matrix:
"""simple docstring"""
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = [[0 for _ in range(size + 1 )] for _ in range(_lowercase )]
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for row in range(_lowercase ):
for col in range(_lowercase ):
__UpperCamelCase = matrix[row][col]
__UpperCamelCase = vector[row][0]
__UpperCamelCase = 0
__UpperCamelCase = 0
while row < size and col < size:
# pivoting
__UpperCamelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_lowercase , _lowercase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__UpperCamelCase, __UpperCamelCase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowercase ):
__UpperCamelCase = augmented[rowa][col] / augmented[row][col]
__UpperCamelCase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowercase ):
for row in range(_lowercase ):
__UpperCamelCase = augmented[row][col] / augmented[col][col]
for cola in range(_lowercase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_lowercase )
]
def _A ( _lowercase ) -> Callable[[int], int]:
"""simple docstring"""
__UpperCamelCase = len(_lowercase )
__UpperCamelCase = [[0 for _ in range(_lowercase )] for _ in range(_lowercase )]
__UpperCamelCase = [[0] for _ in range(_lowercase )]
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
for x_val, y_val in enumerate(_lowercase ):
for col in range(_lowercase ):
__UpperCamelCase = (x_val + 1) ** (size - col - 1)
__UpperCamelCase = y_val
__UpperCamelCase = solve(_lowercase , _lowercase )
def interpolated_func(_lowercase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_lowercase ) )
return interpolated_func
def _A ( _lowercase ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def _A ( _lowercase = question_function , _lowercase = 10 ) -> int:
"""simple docstring"""
__UpperCamelCase = [func(_lowercase ) for x_val in range(1 , order + 1 )]
__UpperCamelCase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__UpperCamelCase = 0
__UpperCamelCase = 42
__UpperCamelCase = 42
for poly in polynomials:
__UpperCamelCase = 1
while func(_lowercase ) == poly(_lowercase ):
x_val += 1
ret += poly(_lowercase )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 0 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[int] , _snake_case :list[int] , _snake_case :int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] , _snake_case :int , _snake_case :list[int] , _snake_case :int ) -> bool:
# Base Case
if index == len(_snake_case ):
return True
# Recursive Step
for i in range(_snake_case ):
if valid_coloring(graph[index] , _snake_case , _snake_case ):
# Color current vertex
_A = i
# Validate coloring
if util_color(_snake_case , _snake_case , _snake_case , index + 1 ):
return True
# Backtrack
_A = -1
return False
def SCREAMING_SNAKE_CASE_ ( _snake_case :list[list[int]] , _snake_case :int ) -> list[int]:
_A = [-1] * len(_snake_case )
if util_color(_snake_case , _snake_case , _snake_case , 0 ):
return colored_vertices
return []
| 2 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 0 |
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class SCREAMING_SNAKE_CASE__ :
def __init__( self )-> Dict:
'''simple docstring'''
UpperCamelCase = ''
UpperCamelCase = ''
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = 256
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
UpperCamelCase = 0
def UpperCAmelCase_ ( self , A_ )-> str:
'''simple docstring'''
UpperCamelCase = cva.imread(A_ , 0 )
UpperCamelCase = copy.deepcopy(self.img )
UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCamelCase = np.sum(A_ )
for i in range(len(A_ ) ):
UpperCamelCase = x[i] / self.k
self.sk += prk
UpperCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCamelCase = int(last % last )
UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(A_ )
UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCamelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCamelCase = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 3 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 0 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : Optional[Any] = logging.getLogger()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
lowerCAmelCase = {}
lowerCAmelCase = os.path.join(_UpperCAmelCase , 'all_results.json' )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase , 'r' ) as f:
lowerCAmelCase = json.load(_UpperCAmelCase )
else:
raise ValueError(F'can\'t find {path}' )
return results
__UpperCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class a ( a__ ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
import xla_spawn
lowerCAmelCase = self.get_auto_remove_tmp_dir()
lowerCAmelCase = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
lowerCAmelCase = time()
xla_spawn.main()
lowerCAmelCase = time()
lowerCAmelCase = get_results(_snake_case )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import xla_spawn
lowerCAmelCase = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_snake_case , 'argv' , _snake_case ):
xla_spawn.main()
| 4 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 0 |
'''simple docstring'''
from __future__ import annotations
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = text, pattern
_lowerCAmelCase , _lowerCAmelCase = len(_lowercase ), len(_lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowercase ( self , _lowercase ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
_lowerCAmelCase = self.mismatch_in_text(_lowercase )
if mismatch_index == -1:
positions.append(_lowercase )
else:
_lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
_lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowercase = """ABAABA"""
_lowercase = """AB"""
_lowercase = BoyerMooreSearch(text, pattern)
_lowercase = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 5 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCamelCase_ ( UpperCamelCase__ ):
def __get__( self :Any , __A :Dict , __A :List[Any]=None ) -> List[Any]:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("""unreadable attribute""" )
SCREAMING_SNAKE_CASE__ = """__cached_""" + self.fget.__name__
SCREAMING_SNAKE_CASE__ = getattr(__A , __A , __A )
if cached is None:
SCREAMING_SNAKE_CASE__ = self.fget(__A )
setattr(__A , __A , __A )
return cached
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'''invalid truth value {val!r}''' )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
if is_torch_fx_proxy(UpperCamelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase__ , np.ndarray )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
return isinstance(UpperCamelCase__ , np.ndarray )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
return _is_numpy(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ):
import torch
return isinstance(UpperCamelCase__ , torch.Tensor )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
return False if not is_torch_available() else _is_torch(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ):
import torch
return isinstance(UpperCamelCase__ , torch.device )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
return False if not is_torch_available() else _is_torch_device(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
import torch
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
else:
return False
return isinstance(UpperCamelCase__ , torch.dtype )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
import tensorflow as tf
return isinstance(UpperCamelCase__ , tf.Tensor )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase__ , """is_symbolic_tensor""" ):
return tf.is_symbolic_tensor(UpperCamelCase__ )
return type(UpperCamelCase__ ) == tf.Tensor
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase__ , jnp.ndarray )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
return False if not is_flax_available() else _is_jax(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] ):
if isinstance(UpperCamelCase__ , (dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase__ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_py_obj(UpperCamelCase__ ) for o in obj]
elif is_tf_tensor(UpperCamelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase__ ):
return np.asarray(UpperCamelCase__ ).tolist()
elif isinstance(UpperCamelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
if isinstance(UpperCamelCase__ , (dict, UserDict) ):
return {k: to_numpy(UpperCamelCase__ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return np.array(UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase__ ):
return np.asarray(UpperCamelCase__ )
else:
return obj
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
SCREAMING_SNAKE_CASE__ = getattr(self , class_fields[0].name )
SCREAMING_SNAKE_CASE__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = first_field.items()
SCREAMING_SNAKE_CASE__ = True
else:
try:
SCREAMING_SNAKE_CASE__ = iter(__A )
SCREAMING_SNAKE_CASE__ = True
except TypeError:
SCREAMING_SNAKE_CASE__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A , (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0] , __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
SCREAMING_SNAKE_CASE__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
SCREAMING_SNAKE_CASE__ = element[1]
elif first_field is not None:
SCREAMING_SNAKE_CASE__ = first_field
else:
for field in class_fields:
SCREAMING_SNAKE_CASE__ = getattr(self , field.name )
if v is not None:
SCREAMING_SNAKE_CASE__ = v
def __delitem__( self :List[str] , *__A :Tuple , **__A :str ) -> List[str]:
"""simple docstring"""
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _snake_case ( self :Union[str, Any] , *__A :Union[str, Any] , **__A :List[Any] ) -> Optional[Any]:
"""simple docstring"""
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _snake_case ( self :Union[str, Any] , *__A :List[Any] , **__A :Union[str, Any] ) -> Tuple:
"""simple docstring"""
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _snake_case ( self :Optional[Any] , *__A :Union[str, Any] , **__A :Optional[int] ) -> int:
"""simple docstring"""
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self :List[str] , __A :List[str] ) -> str:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self :List[str] , __A :Dict , __A :Union[str, Any] ) -> Any:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A , __A )
super().__setattr__(__A , __A )
def __setitem__( self :Optional[int] , __A :int , __A :Any ) -> Tuple:
"""simple docstring"""
super().__setitem__(__A , __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A , __A )
def _snake_case ( self :Union[str, Any] ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@classmethod
def _snake_case ( cls :Union[str, Any] , __A :str ) -> str:
"""simple docstring"""
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "longest"
lowerCamelCase_ = "max_length"
lowerCamelCase_ = "do_not_pad"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "pt"
lowerCamelCase_ = "tf"
lowerCamelCase_ = "np"
lowerCamelCase_ = "jax"
class UpperCamelCase_ :
def __init__( self :Union[str, Any] , __A :List[ContextManager] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = context_managers
SCREAMING_SNAKE_CASE__ = ExitStack()
def __enter__( self :Any ) -> Any:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self :Any , *__A :Union[str, Any] , **__A :Dict ) -> Tuple:
"""simple docstring"""
self.stack.__exit__(*__A , **__A )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = infer_framework(UpperCamelCase__ )
if framework == "tf":
SCREAMING_SNAKE_CASE__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
SCREAMING_SNAKE_CASE__ = inspect.signature(model_class.forward ) # PyTorch models
else:
SCREAMING_SNAKE_CASE__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = model_class.__name__
SCREAMING_SNAKE_CASE__ = infer_framework(UpperCamelCase__ )
if framework == "tf":
SCREAMING_SNAKE_CASE__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
SCREAMING_SNAKE_CASE__ = inspect.signature(model_class.forward ) # PyTorch models
else:
SCREAMING_SNAKE_CASE__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: MutableMapping , UpperCamelCase__: str = "" , UpperCamelCase__: str = "." ):
def _flatten_dict(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple="" , UpperCamelCase__: Optional[Any]="." ):
for k, v in d.items():
SCREAMING_SNAKE_CASE__ = str(UpperCamelCase__ ) + delimiter + str(UpperCamelCase__ ) if parent_key else k
if v and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
yield from flatten_dict(UpperCamelCase__ , UpperCamelCase__ , delimiter=UpperCamelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int]=None ):
if is_numpy_array(UpperCamelCase__ ):
return np.transpose(UpperCamelCase__ , axes=UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.T if axes is None else array.permute(*UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.transpose(UpperCamelCase__ , perm=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.transpose(UpperCamelCase__ , axes=UpperCamelCase__ )
else:
raise ValueError(f'''Type not supported for transpose: {type(UpperCamelCase__ )}.''' )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: str ):
if is_numpy_array(UpperCamelCase__ ):
return np.reshape(UpperCamelCase__ , UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.reshape(*UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.reshape(UpperCamelCase__ , UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.reshape(UpperCamelCase__ , UpperCamelCase__ )
else:
raise ValueError(f'''Type not supported for reshape: {type(UpperCamelCase__ )}.''' )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any]=None ):
if is_numpy_array(UpperCamelCase__ ):
return np.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.squeeze(UpperCamelCase__ , axis=UpperCamelCase__ )
else:
raise ValueError(f'''Type not supported for squeeze: {type(UpperCamelCase__ )}.''' )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: str ):
if is_numpy_array(UpperCamelCase__ ):
return np.expand_dims(UpperCamelCase__ , UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.unsqueeze(dim=UpperCamelCase__ )
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase__ , axis=UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return jnp.expand_dims(UpperCamelCase__ , axis=UpperCamelCase__ )
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCamelCase__ )}.''' )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
if is_numpy_array(UpperCamelCase__ ):
return np.size(UpperCamelCase__ )
elif is_torch_tensor(UpperCamelCase__ ):
return array.numel()
elif is_tf_tensor(UpperCamelCase__ ):
import tensorflow as tf
return tf.size(UpperCamelCase__ )
elif is_jax_tensor(UpperCamelCase__ ):
return array.size
else:
raise ValueError(f'''Type not supported for expand_dims: {type(UpperCamelCase__ )}.''' )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] ):
for key, value in auto_map.items():
if isinstance(UpperCamelCase__ , (tuple, list) ):
SCREAMING_SNAKE_CASE__ = [f'''{repo_id}--{v}''' if (v is not None and """--""" not in v) else v for v in value]
elif value is not None and "--" not in value:
SCREAMING_SNAKE_CASE__ = f'''{repo_id}--{value}'''
return auto_map
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict ):
for base_class in inspect.getmro(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = base_class.__module__
SCREAMING_SNAKE_CASE__ = base_class.__name__
if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("""torch""" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'''Could not infer framework from class {model_class}.''' )
| 6 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a = logging.get_logger(__name__)
a = {
'''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '''mctct'''
def __init__( self : Optional[Any] , _UpperCAmelCase : str=8_065 , _UpperCAmelCase : int=1_536 , _UpperCAmelCase : Tuple=36 , _UpperCAmelCase : int=6_144 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[str]=384 , _UpperCAmelCase : Dict=920 , _UpperCAmelCase : Tuple=1E-5 , _UpperCAmelCase : Optional[Any]=0.3 , _UpperCAmelCase : Any="relu" , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Optional[int]=0.3 , _UpperCAmelCase : int=0.3 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : Tuple=0.3 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : str=(7,) , _UpperCAmelCase : Tuple=(3,) , _UpperCAmelCase : Any=80 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[str]="sum" , _UpperCAmelCase : List[str]=False , **_UpperCAmelCase : Union[str, Any] , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = intermediate_size
_A = num_attention_heads
_A = attention_head_dim
_A = max_position_embeddings
_A = layer_norm_eps
_A = layerdrop
_A = hidden_act
_A = initializer_range
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = pad_token_id
_A = bos_token_id
_A = eos_token_id
_A = conv_glu_dim
_A = conv_dropout
_A = num_conv_layers
_A = input_feat_per_channel
_A = input_channels
_A = conv_channels
_A = ctc_loss_reduction
_A = ctc_zero_infinity
# prevents config testing fail with exporting to json
_A = list(_UpperCAmelCase )
_A = list(_UpperCAmelCase )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 7 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase__ : List[str] = logging.getLogger(__name__)
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] ) -> int:
# save results
if os.path.exists(__snake_case ):
if os.path.exists(os.path.join(__snake_case , 'config.json' ) ) and os.path.isfile(
os.path.join(__snake_case , 'config.json' ) ):
os.remove(os.path.join(__snake_case , 'config.json' ) )
if os.path.exists(os.path.join(__snake_case , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__snake_case , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__snake_case , 'pytorch_model.bin' ) )
else:
os.makedirs(__snake_case )
model.save_pretrained(__snake_case )
def _lowerCAmelCase ( __snake_case : Dict , __snake_case : List[Any]=False ) -> Any:
__A : Optional[Any] = 2
if unlogit:
__A : Optional[Any] = torch.pow(__snake_case , __snake_case )
__A : Tuple = p * torch.log(__snake_case )
__A : Any = 0
return -plogp.sum(dim=-1 )
def _lowerCAmelCase ( __snake_case : Optional[Any] ) -> Optional[int]:
logger.info('lv, h >\t' + '\t'.join(f'{x + 1}' for x in range(len(__snake_case ) ) ) )
for row in range(len(__snake_case ) ):
if tensor.dtype != torch.long:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:d}' for x in tensor[row].cpu().data ) )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[Any]=True , __snake_case : str=True , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=False ) -> Optional[Any]:
__A ,__A : str = model.config.num_hidden_layers, model.config.num_attention_heads
__A : Dict = torch.zeros(__snake_case , __snake_case ).to(args.device )
__A : str = torch.zeros(__snake_case , __snake_case ).to(args.device )
if head_mask is None:
__A : List[Any] = torch.ones(__snake_case , __snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=__snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__A : int = None
__A : int = 0.0
__A : Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(__snake_case , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__A : Dict = tuple(t.to(args.device ) for t in inputs )
((__A) ,) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__A : Any = model(__snake_case , labels=__snake_case , head_mask=__snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__A ,__A ,__A : Union[str, Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__snake_case ):
__A : List[Any] = entropy(attn.detach() , __snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__A : Tuple = 2
__A : Dict = torch.pow(torch.pow(__snake_case , __snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-2_0
if not args.dont_normalize_global_importance:
__A : Any = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__snake_case )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__snake_case )
logger.info('Head ranked by importance scores' )
__A : Any = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__A : int = torch.arange(
head_importance.numel() , device=args.device )
__A : Tuple = head_ranks.view_as(__snake_case )
print_ad_tensor(__snake_case )
return attn_entropy, head_importance, total_loss
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Any ) -> Tuple:
__A ,__A ,__A : List[Any] = compute_heads_importance(__snake_case , __snake_case , __snake_case , compute_entropy=__snake_case )
__A : Union[str, Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __snake_case , original_score * args.masking_threshold )
__A : Optional[int] = torch.ones_like(__snake_case )
__A : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__A : List[str] = original_score
while current_score >= original_score * args.masking_threshold:
__A : Any = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__A : Dict = float('Inf' )
__A : List[str] = head_importance.view(-1 ).sort()[1]
if len(__snake_case ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__A : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__A : Optional[Any] = new_head_mask.view(-1 )
__A : Dict = 0.0
__A : Union[str, Any] = new_head_mask.view_as(__snake_case )
__A : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__snake_case )
# Compute metric and head importance again
__A ,__A ,__A : Tuple = compute_heads_importance(
__snake_case , __snake_case , __snake_case , compute_entropy=__snake_case , head_mask=__snake_case )
__A : Tuple = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__snake_case )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : int ) -> int:
__A : Any = datetime.now()
__A ,__A ,__A : Union[str, Any] = compute_heads_importance(
__snake_case , __snake_case , __snake_case , compute_entropy=__snake_case , compute_importance=__snake_case , head_mask=__snake_case )
__A : Tuple = 1 / loss
__A : str = datetime.now() - before_time
__A : Union[str, Any] = sum(p.numel() for p in model.parameters() )
__A : str = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(__snake_case , __snake_case ):
__A : Optional[int] = [
v,
]
assert sum(len(__snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__snake_case )
__A : int = sum(p.numel() for p in model.parameters() )
__A : Dict = datetime.now()
__A ,__A ,__A : Any = compute_heads_importance(
__snake_case , __snake_case , __snake_case , compute_entropy=__snake_case , compute_importance=__snake_case , head_mask=__snake_case , actually_pruned=__snake_case , )
__A : Dict = 1 / loss
__A : Optional[Any] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __snake_case , __snake_case , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __snake_case , __snake_case )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__snake_case , args.output_dir )
def _lowerCAmelCase ( ) -> Optional[int]:
__A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__snake_case , type=__snake_case , required=__snake_case , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__snake_case , type=__snake_case , required=__snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__snake_case , type=__snake_case , required=__snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__snake_case , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__snake_case , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__snake_case , type=__snake_case , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__snake_case , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__snake_case , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__snake_case , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__snake_case , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__snake_case , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__snake_case , help='Batch size.' )
parser.add_argument('--seed' , type=__snake_case , default=42 )
parser.add_argument('--local_rank' , type=__snake_case , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__snake_case , default='' , help='Can be used for distant debugging.' )
__A : Tuple = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__A : Optional[int] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__A : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__A : Dict = torch.device('cuda' , args.local_rank )
__A : int = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__A : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__A : List[str] = nn.parallel.DistributedDataParallel(
__snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__snake_case )
elif args.n_gpu > 1:
__A : Tuple = nn.DataParallel(__snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__snake_case )
torch.save(__snake_case , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __snake_case )
# Prepare dataset
__A : Any = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__A : List[Any] = (torch.from_numpy(__snake_case ),)
__A : List[str] = TensorDataset(*__snake_case )
__A : Any = RandomSampler(__snake_case )
__A : List[str] = DataLoader(__snake_case , sampler=__snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__snake_case , __snake_case , __snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__A : Optional[int] = mask_heads(__snake_case , __snake_case , __snake_case )
prune_heads(__snake_case , __snake_case , __snake_case , __snake_case )
if __name__ == "__main__":
main()
| 8 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 0 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE__ = {
'''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'''
}
def A ( __UpperCamelCase = "dhaka" , __UpperCamelCase = 5 ) -> int:
A__ = min(__UpperCamelCase , 50 ) # Prevent abuse!
A__ = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
A__ = requests.get('https://www.google.com/search' , params=__UpperCamelCase , headers=__UpperCamelCase )
A__ = BeautifulSoup(html.text , 'html.parser' )
A__ = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) )
A__ = json.dumps(__UpperCamelCase )
A__ = json.loads(__UpperCamelCase )
A__ = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , __UpperCamelCase , )
if not matched_google_image_data:
return 0
A__ = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(__UpperCamelCase ) , )
A__ = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , __UpperCamelCase , )
for index, fixed_full_res_image in enumerate(__UpperCamelCase ):
if index >= max_images:
return index
A__ = bytes(__UpperCamelCase , 'ascii' ).decode(
'unicode-escape' )
A__ = bytes(__UpperCamelCase , 'ascii' ).decode(
'unicode-escape' )
A__ = urllib.request.build_opener()
A__ = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(__UpperCamelCase )
A__ = f'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
urllib.request.urlretrieve( # noqa: S310
__UpperCamelCase , f'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
SCREAMING_SNAKE_CASE__ = download_images_from_google_query(sys.argv[1])
print(f'{image_count} images were downloaded to disk.')
except IndexError:
print('''Please provide a search term.''')
raise
| 9 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case ):
_UpperCamelCase = multiprocessing.Manager()
_UpperCamelCase = manager.list()
_UpperCamelCase = multiprocessing.Process(target=__snake_case , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _snake_case ( __snake_case , __snake_case , __snake_case ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCamelCase = shutil.rmtree
_UpperCamelCase = os.rmdir
_UpperCamelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCamelCase = {}
with swallow_io():
with time_limit(__snake_case ):
exec(__snake_case , __snake_case )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"""failed: {e}""" )
# Needed for cleaning up.
_UpperCamelCase = rmtree
_UpperCamelCase = rmdir
_UpperCamelCase = chdir
@contextlib.contextmanager
def _snake_case ( __snake_case ):
def signal_handler(__snake_case , __snake_case ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , __snake_case )
signal.signal(signal.SIGALRM , __snake_case )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _snake_case ( ):
_UpperCamelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(__snake_case ):
with contextlib.redirect_stderr(__snake_case ):
with redirect_stdin(__snake_case ):
yield
@contextlib.contextmanager
def _snake_case ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(__snake_case ):
yield dirname
class lowerCAmelCase_ ( __lowercase ):
pass
class lowerCAmelCase_ ( io.StringIO ):
def UpperCamelCase_ ( self : str , *_A : Tuple , **_A : int ):
raise OSError
def UpperCamelCase_ ( self : int , *_A : List[Any] , **_A : Optional[Any] ):
raise OSError
def UpperCamelCase_ ( self : Optional[int] , *_A : Any , **_A : Dict ):
raise OSError
def UpperCamelCase_ ( self : int , *_A : Tuple , **_A : str ):
return False
class lowerCAmelCase_ ( contextlib._RedirectStream ): # type: ignore
UpperCAmelCase = "stdin"
@contextlib.contextmanager
def _snake_case ( __snake_case ):
if root == ".":
yield
return
_UpperCamelCase = os.getcwd()
os.chdir(__snake_case )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__snake_case )
def _snake_case ( __snake_case=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCamelCase = None
_UpperCamelCase = None
import os
_UpperCamelCase = '''1'''
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
import shutil
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
import subprocess
_UpperCamelCase = None # type: ignore
_UpperCamelCase = None
import sys
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
| 10 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__A , int(b / 2)) * actual_power(__A , int(b / 2))
else:
return a * actual_power(__A , int(b / 2)) * actual_power(__A , int(b / 2))
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if b < 0:
return 1 / actual_power(__A , __A)
return actual_power(__A , __A)
if __name__ == "__main__":
print(power(-2, -3))
| 11 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 0 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCamelCase ( lowercase_ = "isbn/0140328726" ) -> dict:
'''simple docstring'''
lowercase__ : Dict = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowercase__ : Optional[Any] = F'{olid} is not a valid Open Library olid'
raise ValueError(lowercase_ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def UpperCamelCase ( lowercase_ ) -> dict:
'''simple docstring'''
lowercase__ : Tuple = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowercase__ : List[Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowercase__ : Any = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowercase__ : Tuple = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : List[str] = """, """.join(lowercase_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCamelCase__ : Tuple = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
lowerCamelCase__ : Optional[Any] = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 12 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 0 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
A__ : Dict = """src/transformers"""
# Matches is_xxx_available()
A__ : Union[str, Any] = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
A__ : Tuple = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A__ : str = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
A__ : Union[str, Any] = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
A__ : int = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A__ : List[Any] = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
A__ : Tuple = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
A__ : List[Any] = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
A__ : Any = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
A__ : str = re.compile(R"""^\s*try:""")
# Catches a line with else:
A__ : List[str] = re.compile(R"""^\s*else:""")
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> List[str]:
if _re_test_backend.search(UpperCAmelCase_ ) is None:
return None
__lowerCamelCase : str = [b[0] for b in _re_backend.findall(UpperCAmelCase_ )]
backends.sort()
return "_and_".join(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple ) -> Union[str, Any]:
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Union[str, Any] = f.readlines()
__lowerCamelCase : Any = 0
while line_index < len(UpperCAmelCase_ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCAmelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
__lowerCamelCase : Dict = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowerCamelCase : Optional[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCAmelCase_ ):
__lowerCamelCase : Optional[int] = _re_one_line_import_struct.search(UpperCAmelCase_ ).groups()[0]
__lowerCamelCase : Union[str, Any] = re.findall('\[([^\]]+)\]' , UpperCAmelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowerCamelCase : Tuple = _re_import_struct_key_value.search(UpperCAmelCase_ )
if single_line_import_search is not None:
__lowerCamelCase : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(UpperCAmelCase_ ) > 0]
objects.extend(UpperCAmelCase_ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowerCamelCase : Tuple = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowerCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCamelCase : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCamelCase : Tuple = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowerCamelCase : int = lines[line_index]
if _re_import_struct_add_one.search(UpperCAmelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCAmelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCAmelCase_ ) is not None:
__lowerCamelCase : int = _re_import_struct_add_many.search(UpperCAmelCase_ ).groups()[0].split(', ' )
__lowerCamelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(UpperCAmelCase_ ) > 0]
objects.extend(UpperCAmelCase_ )
elif _re_between_brackets.search(UpperCAmelCase_ ) is not None:
__lowerCamelCase : Dict = _re_between_brackets.search(UpperCAmelCase_ ).groups()[0].split(', ' )
__lowerCamelCase : Union[str, Any] = [obj[1:-1] for obj in imports if len(UpperCAmelCase_ ) > 0]
objects.extend(UpperCAmelCase_ )
elif _re_quote_object.search(UpperCAmelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCAmelCase_ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowerCamelCase : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowerCamelCase : Union[str, Any] = []
while (
line_index < len(UpperCAmelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowerCamelCase : List[str] = lines[line_index]
__lowerCamelCase : Dict = _re_import.search(UpperCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowerCamelCase : str = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCAmelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowerCamelCase : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowerCamelCase : Optional[Any] = lines[line_index]
__lowerCamelCase : Optional[Any] = _re_import.search(UpperCAmelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowerCamelCase : Any = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ) -> List[Any]:
def find_duplicates(UpperCAmelCase_ : List[str] ):
return [k for k, v in collections.Counter(UpperCAmelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowerCamelCase : Dict = []
for key in import_dict_objects.keys():
__lowerCamelCase : Any = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
__lowerCamelCase : Optional[Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowerCamelCase : Tuple = 'base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def UpperCAmelCase__ ( ) -> str:
__lowerCamelCase : str = []
for root, _, files in os.walk(UpperCAmelCase_ ):
if "__init__.py" in files:
__lowerCamelCase : Dict = os.path.join(UpperCAmelCase_ , '__init__.py' )
__lowerCamelCase : int = parse_init(UpperCAmelCase_ )
if objects is not None:
__lowerCamelCase : Optional[Any] = analyze_results(*UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
__lowerCamelCase : Optional[int] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(UpperCAmelCase_ ) )
if len(UpperCAmelCase_ ) > 0:
raise ValueError('\n\n'.join(UpperCAmelCase_ ) )
def UpperCAmelCase__ ( ) -> Union[str, Any]:
__lowerCamelCase : List[Any] = []
for path, directories, files in os.walk(UpperCAmelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(UpperCAmelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCAmelCase_ ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowerCamelCase : List[str] = str((Path(UpperCAmelCase_ ) / folder).relative_to(UpperCAmelCase_ ) )
__lowerCamelCase : int = short_path.replace(os.path.sep , '.' )
submodules.append(UpperCAmelCase_ )
for fname in files:
if fname == "__init__.py":
continue
__lowerCamelCase : int = str((Path(UpperCAmelCase_ ) / fname).relative_to(UpperCAmelCase_ ) )
__lowerCamelCase : int = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(UpperCAmelCase_ )
return submodules
A__ : Optional[Any] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def UpperCAmelCase__ ( ) -> List[Any]:
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : List[str] = importlib.util.spec_from_file_location(
'transformers' , os.path.join(UpperCAmelCase_ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowerCamelCase : Dict = spec.loader.load_module()
__lowerCamelCase : str = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(UpperCAmelCase_ ) > 0:
__lowerCamelCase : List[Any] = '\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 13 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_a : List[str] = tau * frequency / samplerate
_a : int = sin(__a )
_a : str = cos(__a )
_a : Optional[int] = _sin / (2 * q_factor)
_a : List[Any] = (1 - _cos) / 2
_a : Optional[int] = 1 - _cos
_a : Union[str, Any] = 1 + alpha
_a : Dict = -2 * _cos
_a : Any = 1 - alpha
_a : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_a : List[Any] = tau * frequency / samplerate
_a : Union[str, Any] = sin(__a )
_a : Optional[int] = cos(__a )
_a : Optional[Any] = _sin / (2 * q_factor)
_a : List[Any] = (1 + _cos) / 2
_a : List[str] = -1 - _cos
_a : Tuple = 1 + alpha
_a : Optional[int] = -2 * _cos
_a : Optional[Any] = 1 - alpha
_a : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_a : List[Any] = tau * frequency / samplerate
_a : Union[str, Any] = sin(__a )
_a : Optional[int] = cos(__a )
_a : str = _sin / (2 * q_factor)
_a : Optional[Any] = _sin / 2
_a : str = 0
_a : List[str] = -ba
_a : Tuple = 1 + alpha
_a : Any = -2 * _cos
_a : Any = 1 - alpha
_a : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_a : List[str] = tau * frequency / samplerate
_a : Any = sin(__a )
_a : Optional[int] = cos(__a )
_a : Any = _sin / (2 * q_factor)
_a : int = 1 - alpha
_a : List[str] = -2 * _cos
_a : Optional[Any] = 1 + alpha
_a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float ,__a : float = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_a : List[Any] = tau * frequency / samplerate
_a : Optional[Any] = sin(__a )
_a : Optional[Any] = cos(__a )
_a : Optional[int] = _sin / (2 * q_factor)
_a : Dict = 10 ** (gain_db / 40)
_a : List[Any] = 1 + alpha * big_a
_a : int = -2 * _cos
_a : Dict = 1 - alpha * big_a
_a : Optional[Any] = 1 + alpha / big_a
_a : Dict = -2 * _cos
_a : int = 1 - alpha / big_a
_a : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float ,__a : float = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_a : Optional[Any] = tau * frequency / samplerate
_a : List[Any] = sin(__a )
_a : Any = cos(__a )
_a : str = _sin / (2 * q_factor)
_a : Union[str, Any] = 10 ** (gain_db / 40)
_a : Tuple = (big_a + 1) - (big_a - 1) * _cos
_a : Tuple = (big_a + 1) + (big_a - 1) * _cos
_a : Dict = (big_a - 1) - (big_a + 1) * _cos
_a : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
_a : Tuple = 2 * sqrt(__a ) * alpha
_a : List[str] = big_a * (pmc + aaa)
_a : List[str] = 2 * big_a * mpc
_a : Union[str, Any] = big_a * (pmc - aaa)
_a : Optional[Any] = ppmc + aaa
_a : str = -2 * pmpc
_a : str = ppmc - aaa
_a : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float ,__a : float = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_a : int = tau * frequency / samplerate
_a : Tuple = sin(__a )
_a : Optional[int] = cos(__a )
_a : Dict = _sin / (2 * q_factor)
_a : Tuple = 10 ** (gain_db / 40)
_a : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_a : List[Any] = (big_a + 1) + (big_a - 1) * _cos
_a : List[str] = (big_a - 1) - (big_a + 1) * _cos
_a : Tuple = (big_a - 1) + (big_a + 1) * _cos
_a : List[Any] = 2 * sqrt(__a ) * alpha
_a : Any = big_a * (ppmc + aaa)
_a : Dict = -2 * big_a * pmpc
_a : Tuple = big_a * (ppmc - aaa)
_a : List[str] = pmc + aaa
_a : List[str] = 2 * mpc
_a : Optional[Any] = pmc - aaa
_a : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 14 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A : List[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
A__ = XLMRobertaTokenizer
A__ = XLMRobertaTokenizerFast
A__ = True
A__ = True
def lowerCamelCase__ (self : List[str] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = XLMRobertaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ (self : Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = """<pad>"""
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowerCamelCase__ (self : List[Any] ) -> int:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_UpperCAmelCase ) , 1002 )
def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def lowerCamelCase__ (self : Any ) -> Any:
"""simple docstring"""
lowercase__ = XLMRobertaTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
lowercase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def lowerCamelCase__ (self : Optional[Any] ) -> str:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase__ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = tempfile.mkdtemp()
lowercase__ = tokenizer_r.save_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
lowercase__ = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
lowercase__ = tokenizer_r.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=True
lowercase__ = tempfile.mkdtemp()
lowercase__ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
lowercase__ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase )
# Checks everything loads correctly in the same way
lowercase__ = tokenizer_r.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
# Save tokenizer rust, legacy_format=False
lowercase__ = tempfile.mkdtemp()
lowercase__ = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase )
lowercase__ = tokenizer_p.save_pretrained(_UpperCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase__ = tokenizer_r.from_pretrained(_UpperCAmelCase )
lowercase__ = tokenizer_p.from_pretrained(_UpperCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase ) )
shutil.rmtree(_UpperCAmelCase )
@cached_property
def lowerCamelCase__ (self : str ) -> str:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" )
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_UpperCAmelCase , f.name )
lowercase__ = XLMRobertaTokenizer(f.name , keep_accents=_UpperCAmelCase )
lowercase__ = pickle.dumps(_UpperCAmelCase )
pickle.loads(_UpperCAmelCase )
def lowerCamelCase__ (self : Optional[int] ) -> Any:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = """I was born in 92000, and this is falsé."""
lowercase__ = tokenizer.tokenize(_UpperCAmelCase )
lowercase__ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
lowercase__ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(_UpperCAmelCase )
lowercase__ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def lowerCamelCase__ (self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = """Hello World!"""
lowercase__ = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowerCamelCase__ (self : str ) -> Any:
"""simple docstring"""
lowercase__ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
lowercase__ = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowerCamelCase__ (self : str ) -> List[str]:
"""simple docstring"""
lowercase__ = {"""input_ids""": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
| 15 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__A : List[Any] = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __a ( ):
SCREAMING_SNAKE_CASE = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE = get_cluster_input()
return config
def __a ( A__ : int=None ):
if subparsers is not None:
SCREAMING_SNAKE_CASE = subparsers.add_parser("config" , description=A__ )
else:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate config command" , description=A__ )
parser.add_argument(
"--config_file" , default=A__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def __a ( A__ : int ):
SCREAMING_SNAKE_CASE = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE = args.config_file
else:
if not os.path.isdir(A__ ):
os.makedirs(A__ )
SCREAMING_SNAKE_CASE = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(A__ )
else:
config.to_yaml_file(A__ )
print(F"accelerate configuration saved at {config_file}" )
def __a ( ):
SCREAMING_SNAKE_CASE = config_command_parser()
SCREAMING_SNAKE_CASE = parser.parse_args()
config_command(A__ )
if __name__ == "__main__":
main()
| 16 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self : Tuple , __A : Optional[Any] , __A : bool = True , __A : Dict[str, int] = None , __A : int = 32 , __A : bool = True , __A : Union[int, float] = 1 / 255 , __A : bool = True , __A : bool = True , __A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , __A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , __A : bool = True , __A : Tuple=7 , __A : Tuple=30 , __A : List[str]=400 , __A : str=3 , ):
__A : str = parent
__A : List[Any] = do_resize
__A : Union[str, Any] = size if size is not None else {"""shortest_edge""": 288}
__A : Optional[Any] = size_divisor
__A : str = do_rescale
__A : Tuple = rescale_factor
__A : Optional[int] = do_normalize
__A : Tuple = do_center_crop
__A : Any = image_mean
__A : Union[str, Any] = image_std
__A : str = do_pad
__A : Any = batch_size
__A : Union[str, Any] = num_channels
__A : Dict = min_resolution
__A : Tuple = max_resolution
def lowerCAmelCase_ ( self : Optional[int] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase_ ( self : str , __A : List[Any] , __A : Any=False ):
if not batched:
__A : Optional[Any] = self.size["""shortest_edge"""]
__A : Optional[int] = image_inputs[0]
if isinstance(__A , Image.Image ):
__A , __A : Any = image.size
else:
__A , __A : Dict = image.shape[1], image.shape[2]
__A : Tuple = size / min(__A , __A )
if h < w:
__A , __A : Union[str, Any] = size, scale * w
else:
__A , __A : str = scale * h, size
__A : List[str] = int((1333 / 800) * size )
if max(__A , __A ) > max_size:
__A : Optional[int] = max_size / max(__A , __A )
__A : Tuple = newh * scale
__A : Optional[Any] = neww * scale
__A , __A : Tuple = int(newh + 0.5 ), int(neww + 0.5 )
__A , __A : List[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__A : Dict = []
for image in image_inputs:
__A , __A : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__A : Dict = max(__A , key=lambda __A : item[0] )[0]
__A : Dict = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase_ ( _lowercase , unittest.TestCase ):
_lowercase : str = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : List[str] ):
__A : str = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Tuple ):
__A : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
self.assertTrue(hasattr(__A , """size_divisor""" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
pass
def lowerCAmelCase_ ( self : Optional[Any] ):
# Initialize image processor
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
__A : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : Union[str, Any] = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : int ):
# Initialize image processor
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : str = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : List[str] = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Dict = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase_ ( self : List[Any] ):
# Initialize image processor
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
__A : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__A , __A : str = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__A : int = image_processing(__A , return_tensors="""pt""" ).pixel_values
__A , __A : Optional[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 17 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 0 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : int | float | str , SCREAMING_SNAKE_CASE_ : int | float | str ):
'''simple docstring'''
if nth_term == "":
return [""]
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = int(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(F'''1 / {pow(temp + 1 , int(SCREAMING_SNAKE_CASE_ ) )}''' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter the last number (nth term) of the P-Series"))
_SCREAMING_SNAKE_CASE = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 18 |
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case = 1_60_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = int(round(sample_rate * max_length ) )
if len(__snake_case ) <= sample_length:
return wav
_UpperCamelCase = randint(0, len(__snake_case ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _UpperCAmelCase:
lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Name of a dataset from the datasets package'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'A file containing the training audio paths and labels.'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'A file containing the validation audio paths and labels.'} )
lowercase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowercase__ = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowercase__ = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
lowercase__ = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
lowercase__ = field(
default=lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowercase__ = field(
default=lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowercase__ = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class _UpperCAmelCase:
lowercase__ = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
lowercase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
lowercase__ = field(
default=lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , __a , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''')
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''', __snake_case, __snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase = training_args.get_process_log_level()
logger.setLevel(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to train from scratch.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset and prepare it for the audio classification task.
_UpperCamelCase = DatasetDict()
_UpperCamelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCamelCase = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, use_auth_token=True if model_args.use_auth_token else None, )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
'''Make sure to set `--label_column_name` to the correct text column - one of '''
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_UpperCamelCase = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path, return_attention_mask=model_args.attention_mask, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_UpperCamelCase = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_UpperCamelCase = feature_extractor.model_input_names[0]
def train_transforms(__snake_case ):
_UpperCamelCase = []
for audio in batch[data_args.audio_column_name]:
_UpperCamelCase = random_subsample(
audio['''array'''], max_length=data_args.max_length_seconds, sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(__snake_case )
_UpperCamelCase = feature_extractor(__snake_case, sampling_rate=feature_extractor.sampling_rate )
_UpperCamelCase = {model_input_name: inputs.get(__snake_case )}
_UpperCamelCase = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(__snake_case ):
_UpperCamelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
_UpperCamelCase = feature_extractor(__snake_case, sampling_rate=feature_extractor.sampling_rate )
_UpperCamelCase = {model_input_name: inputs.get(__snake_case )}
_UpperCamelCase = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_UpperCamelCase = raw_datasets['''train'''].features[data_args.label_column_name].names
_UpperCamelCase , _UpperCamelCase = {}, {}
for i, label in enumerate(__snake_case ):
_UpperCamelCase = str(__snake_case )
_UpperCamelCase = label
# Load the accuracy metric from the datasets package
_UpperCamelCase = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(__snake_case ):
_UpperCamelCase = np.argmax(eval_pred.predictions, axis=1 )
return metric.compute(predictions=__snake_case, references=eval_pred.label_ids )
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(__snake_case ), labelaid=__snake_case, idalabel=__snake_case, finetuning_task='''audio-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
_UpperCamelCase = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__snake_case, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCamelCase = (
raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(__snake_case, output_all_columns=__snake_case )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCamelCase = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(__snake_case, output_all_columns=__snake_case )
# Initialize our trainer
_UpperCamelCase = Trainer(
model=__snake_case, args=__snake_case, train_dataset=raw_datasets['''train'''] if training_args.do_train else None, eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None, compute_metrics=__snake_case, tokenizer=__snake_case, )
# Training
if training_args.do_train:
_UpperCamelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase = last_checkpoint
_UpperCamelCase = trainer.train(resume_from_checkpoint=__snake_case )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCamelCase = trainer.evaluate()
trainer.log_metrics('''eval''', __snake_case )
trainer.save_metrics('''eval''', __snake_case )
# Write model card and (optionally) push to hub
_UpperCamelCase = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
if __name__ == "__main__":
main()
| 19 |
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(lowercase__ ,lowercase__ )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(lowercase__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(lowercase__ ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(lowercase__ )
for j, item in enumerate(lowercase__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(lowercase__ ) ,np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(lowercase__ ,2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 41 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def _lowercase( __a : int , __a : int , __a : int , __a : int , __a : int , __a : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
a__ =ksize + 1
a__ =np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__a ):
for x in range(__a ):
# distance from center
a__ =x - ksize // 2
a__ =y - ksize // 2
# degree to radiant
a__ =theta / 180 * np.pi
a__ =np.cos(_theta )
a__ =np.sin(_theta )
# get kernel x
a__ =cos_theta * px + sin_theta * py
# get kernel y
a__ =-sin_theta * px + cos_theta * py
# fill kernel
a__ =np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_lowerCAmelCase: Union[str, Any] = imread('../image_data/lena.jpg')
# turn image in gray scale value
_lowerCAmelCase: Tuple = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_lowerCAmelCase: Union[str, Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_lowerCAmelCase: Any = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_lowerCAmelCase: Tuple = out / out.max() * 255
_lowerCAmelCase: int = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 20 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 41 | 0 |
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Any =x
__magic_name__ : Dict =y
for step in range(lowerCamelCase ): # noqa: B007
__magic_name__ : str =a * a - b * b + x
__magic_name__ : int =2 * a * b + y
__magic_name__ : Union[str, Any] =a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( lowerCamelCase ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCAmelCase_ ( lowerCamelCase ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowerCamelCase , 1 , 1 ) )
def lowerCAmelCase_ ( lowerCamelCase = 800 , lowerCamelCase = 600 , lowerCamelCase = -0.6 , lowerCamelCase = 0 , lowerCamelCase = 3.2 , lowerCamelCase = 50 , lowerCamelCase = True , ):
__magic_name__ : Tuple =Image.new("""RGB""" , (image_width, image_height) )
__magic_name__ : List[Any] =img.load()
# loop through the image-coordinates
for image_x in range(lowerCamelCase ):
for image_y in range(lowerCamelCase ):
# determine the figure-coordinates based on the image-coordinates
__magic_name__ : Optional[int] =figure_width / image_width * image_height
__magic_name__ : Optional[int] =figure_center_x + (image_x / image_width - 0.5) * figure_width
__magic_name__ : Tuple =figure_center_y + (image_y / image_height - 0.5) * figure_height
__magic_name__ : Optional[int] =get_distance(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__magic_name__ : Optional[int] =get_color_coded_rgb(lowerCamelCase )
else:
__magic_name__ : int =get_black_and_white_rgb(lowerCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase_ : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 21 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 0 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : str=1e-12 ):
'''simple docstring'''
_a = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase , axis=1 ) , a_min=UpperCamelCase ) ).T
_a = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(UpperCamelCase , axis=1 ) , a_min=UpperCamelCase ) ).T
return jnp.matmul(UpperCamelCase , norm_emb_a.T )
class A ( nn.Module ):
lowercase_ = 42
lowercase_ = jnp.floataa
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_a = FlaxCLIPVisionModule(self.config.vision_config )
_a = nn.Dense(self.config.projection_dim , use_bias=lowerCAmelCase_ , dtype=self.dtype )
_a = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
_a = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
_a = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
_a = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__( self : Any , lowerCAmelCase_ : int ) -> List[str]:
"""simple docstring"""
_a = self.vision_model(lowerCAmelCase_ )[1]
_a = self.visual_projection(lowerCAmelCase_ )
_a = jax_cosine_distance(lowerCAmelCase_ , self.special_care_embeds )
_a = jax_cosine_distance(lowerCAmelCase_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_a = 0.0
_a = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_a = jnp.round(lowerCAmelCase_ , 3 )
_a = jnp.any(special_scores > 0 , axis=1 , keepdims=lowerCAmelCase_ )
# Use a lower threshold if an image has any special care concept
_a = is_special_care * 0.0_1
_a = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_a = jnp.round(lowerCAmelCase_ , 3 )
_a = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class A ( _a ):
lowercase_ = CLIPConfig
lowercase_ = 'clip_input'
lowercase_ = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Optional[int] , lowerCAmelCase_ : CLIPConfig , lowerCAmelCase_ : Optional[Tuple] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : jnp.dtype = jnp.floataa , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
if input_shape is None:
_a = (1, 2_24, 2_24, 3)
_a = self.module_class(config=lowerCAmelCase_ , dtype=lowerCAmelCase_ , **lowerCAmelCase_ )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , input_shape=lowerCAmelCase_ , seed=lowerCAmelCase_ , dtype=lowerCAmelCase_ , _do_init=_do_init )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : jax.random.KeyArray , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : FrozenDict = None ) -> FrozenDict:
"""simple docstring"""
_a = jax.random.normal(lowerCAmelCase_ , lowerCAmelCase_ )
_a , _a = jax.random.split(lowerCAmelCase_ )
_a = {'''params''': params_rng, '''dropout''': dropout_rng}
_a = self.module.init(lowerCAmelCase_ , lowerCAmelCase_ )['''params''']
return random_params
def __call__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : dict = None , ) -> int:
"""simple docstring"""
_a = jnp.transpose(lowerCAmelCase_ , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , rngs={} , )
| 22 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
print(F"Vertex\tShortest Distance from vertex {src}" )
for i, d in enumerate(A__ ):
print(F"{i}\t\t{d}" )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [float('''inf''' )] * vertex_count
__lowercase = 0.0
for _ in range(vertex_count - 1 ):
for j in range(A__ ):
__lowercase , __lowercase , __lowercase = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__lowercase = distance[u] + w
__lowercase = check_negative_cycle(A__ , A__ , A__ )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter number of vertices: ''').strip())
lowerCAmelCase__ = int(input('''Enter number of edges: ''').strip())
lowerCAmelCase__ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowerCAmelCase__ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowerCAmelCase__ = int(input('''\nEnter shortest path source:''').strip())
lowerCAmelCase__ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 41 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=14 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.0_2 , ) -> Dict:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = rotary_dim
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = initializer_range
UpperCamelCase_ = None
UpperCamelCase_ = vocab_size - 1
UpperCamelCase_ = vocab_size - 1
UpperCamelCase_ = vocab_size - 1
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
UpperCamelCase_ = 20
UpperCamelCase_ = model_class_name(_UpperCAmelCase )
UpperCamelCase_ = model.init_cache(input_ids.shape[0] , _UpperCAmelCase )
UpperCamelCase_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
UpperCamelCase_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCamelCase_ = model(
input_ids[:, :-1] , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , position_ids=_UpperCAmelCase , )
UpperCamelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase_ = model(
input_ids[:, -1:] , attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=_UpperCAmelCase , )
UpperCamelCase_ = model(_UpperCAmelCase )
UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = 20
UpperCamelCase_ = model_class_name(_UpperCAmelCase )
UpperCamelCase_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCamelCase_ = model.init_cache(input_ids.shape[0] , _UpperCAmelCase )
UpperCamelCase_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCamelCase_ = model(
input_ids[:, :-1] , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , position_ids=_UpperCAmelCase , )
UpperCamelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCamelCase_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_UpperCAmelCase , position_ids=_UpperCAmelCase , )
UpperCamelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
UpperCamelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class _a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
A_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = FlaxGPTJModelTester(self )
def _UpperCAmelCase ( self ) -> Any:
for model_class_name in self.all_model_classes:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
@tooslow
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
UpperCamelCase_ = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_UpperCAmelCase , truncation=_UpperCAmelCase )
UpperCamelCase_ = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
UpperCamelCase_ = False
UpperCamelCase_ = model.config.eos_token_id
UpperCamelCase_ = jax.jit(model.generate )
UpperCamelCase_ = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
UpperCamelCase_ = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@is_pt_flax_cross_test
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCamelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase_ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = pt_inputs['input_ids'].shape
UpperCamelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = pt_model_class(_UpperCAmelCase ).eval()
UpperCamelCase_ = model_class(_UpperCAmelCase , dtype=jnp.floataa )
UpperCamelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _UpperCAmelCase )
UpperCamelCase_ = fx_state
with torch.no_grad():
UpperCamelCase_ = pt_model(**_UpperCAmelCase ).to_tuple()
UpperCamelCase_ = fx_model(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = model_class.from_pretrained(_UpperCAmelCase , from_pt=_UpperCAmelCase )
UpperCamelCase_ = fx_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(
len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCamelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCamelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCamelCase_ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCamelCase_ = pt_model_class(_UpperCAmelCase ).eval()
UpperCamelCase_ = model_class(_UpperCAmelCase , dtype=jnp.floataa )
UpperCamelCase_ = load_flax_weights_in_pytorch_model(_UpperCAmelCase , fx_model.params )
UpperCamelCase_ , UpperCamelCase_ = pt_inputs['input_ids'].shape
UpperCamelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
UpperCamelCase_ = 0
UpperCamelCase_ = 1
UpperCamelCase_ = 0
UpperCamelCase_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCamelCase_ = pt_model(**_UpperCAmelCase ).to_tuple()
UpperCamelCase_ = fx_model(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_UpperCAmelCase )
UpperCamelCase_ = pt_model_class.from_pretrained(_UpperCAmelCase , from_flax=_UpperCAmelCase )
with torch.no_grad():
UpperCamelCase_ = pt_model_loaded(**_UpperCAmelCase ).to_tuple()
self.assertEqual(
len(_UpperCAmelCase ) , len(_UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def _UpperCAmelCase ( self ) -> Optional[int]:
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
UpperCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 23 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,*lowercase__ : Optional[Any] ,**lowercase__ : int ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' ,lowercase__ ,)
super().__init__(*lowercase__ ,**lowercase__ )
| 41 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : UNetaDModel
__lowercase : ScoreSdeVeScheduler
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = 2000 , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pil" , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__snake_case = self.unet.config.sample_size
__snake_case = (batch_size, 3, img_size, img_size)
__snake_case = self.unet
__snake_case = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ) * self.scheduler.init_noise_sigma
__snake_case = sample.to(self.device )
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE )
self.scheduler.set_sigmas(__SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__snake_case = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__snake_case = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
__snake_case = self.scheduler.step_correct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# prediction step
__snake_case = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).sample
__snake_case = self.scheduler.step_pred(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE )
__snake_case , __snake_case = output.prev_sample, output.prev_sample_mean
__snake_case = sample_mean.clamp(0 , 1 )
__snake_case = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(__SCREAMING_SNAKE_CASE )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE )
| 24 |
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = FileLock(str(tmpdir / '''foo.lock''' ) )
__lowercase = 0.0_1
with locka.acquire():
with pytest.raises(A__ ):
__lowercase = time.time()
locka.acquire(A__ )
assert time.time() - _start > timeout
def _A ( A__ ):
"""simple docstring"""
__lowercase = '''a''' * 1000 + '''.lock'''
__lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(A__ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A__ ):
locka.acquire(0 )
| 41 | 0 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_a)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : Optional[Any] = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(_a , id=_a)
| 25 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _A :
def __init__( self : Optional[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
__snake_case : List[Any] = value
__snake_case : Node | None = None
__snake_case : Node | None = None
class _A :
def __init__( self : Tuple , __magic_name__ : Node ) -> None:
"""simple docstring"""
__snake_case : Tuple = tree
def lowercase__ ( self : str , __magic_name__ : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Optional[int] ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase__ = '''src/diffusers'''
# Pattern that looks at the indentation in a line.
lowerCAmelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCAmelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCAmelCase__ = re.compile(R'''\[([^\]]+)\]''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = _re_indent.search(A__ )
return "" if search is None else search.groups()[0]
def _A ( A__ , A__="" , A__=None , A__=None ):
"""simple docstring"""
__lowercase = 0
__lowercase = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(A__ ):
index += 1
__lowercase = ['''\n'''.join(lines[:index] )]
else:
__lowercase = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__lowercase = [lines[index]]
index += 1
while index < len(A__ ) and (end_prompt is None or not lines[index].startswith(A__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(A__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(A__ ) )
if index < len(A__ ) - 1:
__lowercase = [lines[index + 1]]
index += 1
else:
__lowercase = []
else:
blocks.append('''\n'''.join(A__ ) )
__lowercase = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(A__ ) > 0:
blocks.append('''\n'''.join(A__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(A__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _A ( A__ ):
"""simple docstring"""
def _inner(A__ ):
return key(A__ ).lower().replace('''_''' , '''''' )
return _inner
def _A ( A__ , A__=None ):
"""simple docstring"""
def noop(A__ ):
return x
if key is None:
__lowercase = noop
# Constants are all uppercase, they go first.
__lowercase = [obj for obj in objects if key(A__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__lowercase = [obj for obj in objects if key(A__ )[0].isupper() and not key(A__ ).isupper()]
# Functions begin with a lowercase, they go last.
__lowercase = [obj for obj in objects if not key(A__ )[0].isupper()]
__lowercase = ignore_underscore(A__ )
return sorted(A__ , key=A__ ) + sorted(A__ , key=A__ ) + sorted(A__ , key=A__ )
def _A ( A__ ):
"""simple docstring"""
def _replace(A__ ):
__lowercase = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(A__ )] ) + "]"
__lowercase = import_statement.split('''\n''' )
if len(A__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__lowercase = 2 if lines[1].strip() == '''[''' else 1
__lowercase = [(i, _re_strip_line.search(A__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__lowercase = sort_objects(A__ , key=lambda A__ : x[1] )
__lowercase = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(A__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__lowercase = _re_bracket_content.sub(_replace , lines[1] )
else:
__lowercase = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__lowercase = keys[:-1]
__lowercase = get_indent(lines[1] ) + ''', '''.join([F"\"{k}\"" for k in sort_objects(A__ )] )
return "\n".join(A__ )
else:
# Finally we have to deal with imports fitting on one line
__lowercase = _re_bracket_content.sub(_replace , A__ )
return import_statement
def _A ( A__ , A__=True ):
"""simple docstring"""
with open(A__ , '''r''' ) as f:
__lowercase = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__lowercase = split_code_in_indented_blocks(
A__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(A__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__lowercase = main_blocks[block_idx]
__lowercase = block.split('''\n''' )
# Get to the start of the imports.
__lowercase = 0
while line_idx < len(A__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__lowercase = len(A__ )
else:
line_idx += 1
if line_idx >= len(A__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__lowercase = '''\n'''.join(block_lines[line_idx:-1] )
__lowercase = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__lowercase = split_code_in_indented_blocks(A__ , indent_level=A__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__lowercase = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__lowercase = [(pattern.search(A__ ).groups()[0] if pattern.search(A__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__lowercase = [(i, key) for i, key in enumerate(A__ ) if key is not None]
__lowercase = [x[0] for x in sorted(A__ , key=lambda A__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__lowercase = 0
__lowercase = []
for i in range(len(A__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
__lowercase = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(A__ )
count += 1
# And we put our main block back together with its first and last line.
__lowercase = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(A__ ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(A__ , '''w''' ) as f:
f.write('''\n'''.join(A__ ) )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
__lowercase = sort_imports(os.path.join(A__ , '''__init__.py''' ) , check_only=A__ )
if result:
__lowercase = [os.path.join(A__ , '''__init__.py''' )]
if len(A__ ) > 0:
raise ValueError(F"Would overwrite {len(A__ )} files, run `make style`." )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 41 | 0 |
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = 100 ) -> int:
"""simple docstring"""
_A = 1
_A = 2
for i in range(2 , max_n + 1 ):
_A = pre_numerator
_A = 2 * i // 3 if i % 3 == 0 else 1
_A = cur_numerator
_A = e_cont * pre_numerator + temp
return sum_digits(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f"{solution() = }")
| 27 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
SCREAMING_SNAKE_CASE : Optional[int] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') ,up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') ,cross_attention_dim=3_2 ,attention_head_dim=4 ,)
__lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='''scaled_linear''' ,clip_sample=lowercase__ ,set_alpha_to_one=lowercase__ ,)
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
__lowercase = CLIPTextModel(lowercase__ )
__lowercase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowercase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : List[str]=0 ):
if str(lowercase__ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase__ )
else:
__lowercase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowercase = self.get_dummy_components()
__lowercase = TextToVideoSDPipeline(**lowercase__ )
__lowercase = sd_pipe.to(lowercase__ )
sd_pipe.set_progress_bar_config(disable=lowercase__ )
__lowercase = self.get_dummy_inputs(lowercase__ )
__lowercase = '''np'''
__lowercase = sd_pipe(**lowercase__ ).frames
__lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (6_4, 6_4, 3)
__lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def SCREAMING_SNAKE_CASE ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return super().test_progress_bar()
@slow
@skip_mps
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2_5 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' )
__lowercase = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' )
__lowercase = pipe.to('''cuda''' )
__lowercase = '''Spiderman is surfing'''
__lowercase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__lowercase = pipe(lowercase__ ,generator=lowercase__ ,num_inference_steps=2 ,output_type='''pt''' ).frames
__lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 41 | 0 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Any = '''segformer'''
def __init__( self, A=3, A=4, A=[2, 2, 2, 2], A=[8, 4, 2, 1], A=[32, 64, 160, 256], A=[7, 3, 3, 3], A=[4, 2, 2, 2], A=[1, 2, 5, 8], A=[4, 4, 4, 4], A="gelu", A=0.0, A=0.0, A=0.1, A=0.02, A=0.1, A=1E-6, A=256, A=255, **A, ):
'''simple docstring'''
super().__init__(**A )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.', A, )
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = num_encoder_blocks
SCREAMING_SNAKE_CASE : int = depths
SCREAMING_SNAKE_CASE : List[Any] = sr_ratios
SCREAMING_SNAKE_CASE : List[Any] = hidden_sizes
SCREAMING_SNAKE_CASE : List[str] = patch_sizes
SCREAMING_SNAKE_CASE : str = strides
SCREAMING_SNAKE_CASE : List[Any] = mlp_ratios
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : Any = drop_path_rate
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = decoder_hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.get('reshape_last_stage', A )
SCREAMING_SNAKE_CASE : List[str] = semantic_loss_ignore_index
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = version.parse('''1.11''' )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 1E-4
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return 12
| 28 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _A ( A__ ):
"""simple docstring"""
__lowercase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def _A ( A__ ):
"""simple docstring"""
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(A__ , A__ , bias=A__ )
__lowercase = emb.weight.data
return lin_layer
def _A ( A__ , A__="facebook/mbart-large-en-ro" , A__=False , A__=False ):
"""simple docstring"""
__lowercase = torch.load(A__ , map_location='''cpu''' )['''model''']
remove_ignore_keys_(A__ )
__lowercase = state_dict['''encoder.embed_tokens.weight'''].shape[0]
__lowercase = MBartConfig.from_pretrained(A__ , vocab_size=A__ )
if mbart_aa and finetuned:
__lowercase = '''relu'''
__lowercase = state_dict['''decoder.embed_tokens.weight''']
__lowercase = MBartForConditionalGeneration(A__ )
model.model.load_state_dict(A__ )
if finetuned:
__lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 41 | 0 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
A_ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __lowerCamelCase ( datasets.BuilderConfig ):
a__: Optional[datasets.Features] = None
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,):
import pyspark
def generate_fn():
lowerCamelCase_ = df.select('''*''' ,pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
lowerCamelCase_ = df_with_partition_id.select('''*''' ).where(f"part_id = {partition_id}" ).drop('''part_id''' )
lowerCamelCase_ = partition_df.collect()
lowerCamelCase_ = 0
for row in rows:
yield f"{partition_id}_{row_id}", row.asDict()
row_id += 1
return generate_fn
class __lowerCamelCase ( _BaseExamplesIterable ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , ):
lowerCamelCase_ = df
lowerCamelCase_ = partition_order or range(self.df.rdd.getNumPartitions() )
lowerCamelCase_ = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
yield from self.generate_examples_fn()
def UpperCAmelCase__ ( self , UpperCAmelCase ):
lowerCamelCase_ = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = self.split_shard_indices_by_worker(UpperCAmelCase , UpperCAmelCase )
return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase )
@property
def UpperCAmelCase__ ( self ):
return len(self.partition_order )
class __lowerCamelCase ( datasets.DatasetBuilder ):
a__: Optional[Any] = SparkConfig
def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
import pyspark
lowerCamelCase_ = pyspark.sql.SparkSession.builder.getOrCreate()
lowerCamelCase_ = df
lowerCamelCase_ = working_dir
super().__init__(
cache_dir=UpperCAmelCase , config_name=str(self.df.semanticHash() ) , **UpperCAmelCase , )
def UpperCAmelCase__ ( self ):
# Returns the path of the created file.
def create_cache_and_write_probe(UpperCAmelCase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCAmelCase )
lowerCamelCase_ = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCAmelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowerCamelCase_ = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def UpperCAmelCase__ ( self ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self , UpperCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def UpperCAmelCase__ ( self , UpperCAmelCase ):
import pyspark
def get_arrow_batch_size(UpperCAmelCase ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
lowerCamelCase_ = self.df.count()
lowerCamelCase_ = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowerCamelCase_ = (
self.df.limit(UpperCAmelCase )
.repartition(1 )
.mapInArrow(UpperCAmelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowerCamelCase_ = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowerCamelCase_ = min(UpperCAmelCase , int(approx_total_size / max_shard_size ) )
lowerCamelCase_ = self.df.repartition(UpperCAmelCase )
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
import pyspark
lowerCamelCase_ = ParquetWriter if file_format == '''parquet''' else ArrowWriter
lowerCamelCase_ = os.path.join(self._working_dir , os.path.basename(UpperCAmelCase ) ) if self._working_dir else fpath
lowerCamelCase_ = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowerCamelCase_ = self.config.features
lowerCamelCase_ = self._writer_batch_size
lowerCamelCase_ = self._fs.storage_options
def write_arrow(UpperCAmelCase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowerCamelCase_ = pyspark.TaskContext().taskAttemptId()
lowerCamelCase_ = next(UpperCAmelCase , UpperCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
lowerCamelCase_ = 0
lowerCamelCase_ = writer_class(
features=UpperCAmelCase , path=working_fpath.replace('''SSSSS''' , f"{shard_id:05d}" ).replace('''TTTTT''' , f"{task_id:05d}" ) , writer_batch_size=UpperCAmelCase , storage_options=UpperCAmelCase , embed_local_files=UpperCAmelCase , )
lowerCamelCase_ = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
lowerCamelCase_ = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f"{shard_id:05d}" ).replace('''TTTTT''' , f"{task_id:05d}" ) , writer_batch_size=UpperCAmelCase , storage_options=UpperCAmelCase , embed_local_files=UpperCAmelCase , )
lowerCamelCase_ = pa.Table.from_batches([batch] )
writer.write_table(UpperCAmelCase )
if writer._num_bytes > 0:
lowerCamelCase_ , lowerCamelCase_ = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCAmelCase ) ):
lowerCamelCase_ = os.path.join(os.path.dirname(UpperCAmelCase ) , os.path.basename(UpperCAmelCase ) )
shutil.move(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = (
self.df.mapInArrow(UpperCAmelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = "arrow" , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ):
self._validate_cache_dir()
lowerCamelCase_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCAmelCase )
lowerCamelCase_ = not is_remote_filesystem(self._fs )
lowerCamelCase_ = os.path.join if is_local else posixpath.join
lowerCamelCase_ = '''-TTTTT-SSSSS-of-NNNNN'''
lowerCamelCase_ = f"{self.name}-{split_generator.name}{SUFFIX}.{file_format}"
lowerCamelCase_ = path_join(self._output_dir , UpperCAmelCase )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = []
lowerCamelCase_ = []
for task_id, content in self._prepare_split_single(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCAmelCase )
lowerCamelCase_ = total_num_examples
lowerCamelCase_ = total_num_bytes
# should rename everything at the end
logger.debug(f"Renaming {total_shards} shards." )
if total_shards > 1:
lowerCamelCase_ = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowerCamelCase_ = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
rename(
UpperCAmelCase , fpath.replace('''SSSSS''' , f"{shard_id:05d}" ).replace('''TTTTT''' , f"{task_id:05d}" ) , fpath.replace('''TTTTT-SSSSS''' , f"{global_shard_id:05d}" ).replace('''NNNNN''' , f"{total_shards:05d}" ) , )
lowerCamelCase_ = []
lowerCamelCase_ = 0
for i in range(len(UpperCAmelCase ) ):
lowerCamelCase_ , lowerCamelCase_ = task_id_and_num_shards[i]
for shard_id in range(UpperCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCAmelCase , len(UpperCAmelCase ) ).map(lambda UpperCAmelCase : _rename_shard(*UpperCAmelCase ) ).collect()
else:
# don't use any pattern
lowerCamelCase_ = 0
lowerCamelCase_ = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f"{shard_id:05d}" ).replace('''TTTTT''' , f"{task_id:05d}" ) , fpath.replace(UpperCAmelCase , '''''' ) , )
def UpperCAmelCase__ ( self , UpperCAmelCase , ):
return SparkExamplesIterable(self.df )
| 29 |
'''simple docstring'''
import os
from math import logaa
def _A ( A__ = "base_exp.txt" ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(A__ ) , A__ ) ) ):
__lowercase , __lowercase = list(map(A__ , line.split(''',''' ) ) )
if x * logaa(A__ ) > largest:
__lowercase = x * logaa(A__ )
__lowercase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 41 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : int = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
UpperCAmelCase_ : List[Any] = DatasetInfosDict.from_directory(_lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = str(_lowercase )
dataset_info.write_to_directory(_lowercase )
UpperCAmelCase_ : int = DatasetInfo.from_directory(_lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowercase , '''dataset_info.json''' ) )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
UpperCAmelCase_ : List[str] = dataset_info._to_yaml_dict()
assert sorted(_lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCAmelCase_ : str = yaml.safe_dump(_lowercase )
UpperCAmelCase_ : List[Any] = yaml.safe_load(_lowercase )
assert dataset_info_yaml_dict == reloaded
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = DatasetInfo()
UpperCAmelCase_ : Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = str(_lowercase )
dataset_infos_dict.write_to_directory(_lowercase )
UpperCAmelCase_ : Dict = DatasetInfosDict.from_directory(_lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase_ : List[str] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase_ : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowercase , '''README.md''' ) )
| 30 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = 'blenderbot-small'
SCREAMING_SNAKE_CASE : int = ['past_key_values']
SCREAMING_SNAKE_CASE : List[str] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] ,lowercase__ : List[str]=5_0_2_6_5 ,lowercase__ : Optional[Any]=5_1_2 ,lowercase__ : Optional[int]=8 ,lowercase__ : List[Any]=2_0_4_8 ,lowercase__ : List[str]=1_6 ,lowercase__ : str=8 ,lowercase__ : Any=2_0_4_8 ,lowercase__ : Tuple=1_6 ,lowercase__ : Tuple=0.0 ,lowercase__ : List[str]=0.0 ,lowercase__ : Any=True ,lowercase__ : str=True ,lowercase__ : int="gelu" ,lowercase__ : Tuple=5_1_2 ,lowercase__ : List[Any]=0.1 ,lowercase__ : Tuple=0.0 ,lowercase__ : str=0.0 ,lowercase__ : Any=0.0_2 ,lowercase__ : Union[str, Any]=1 ,lowercase__ : List[Any]=False ,lowercase__ : Optional[int]=0 ,lowercase__ : Optional[int]=1 ,lowercase__ : str=2 ,lowercase__ : int=2 ,**lowercase__ : List[str] ,):
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = d_model
__lowercase = encoder_ffn_dim
__lowercase = encoder_layers
__lowercase = encoder_attention_heads
__lowercase = decoder_ffn_dim
__lowercase = decoder_layers
__lowercase = decoder_attention_heads
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = activation_function
__lowercase = init_std
__lowercase = encoder_layerdrop
__lowercase = decoder_layerdrop
__lowercase = use_cache
__lowercase = encoder_layers
__lowercase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ ,bos_token_id=lowercase__ ,eos_token_id=lowercase__ ,is_encoder_decoder=lowercase__ ,decoder_start_token_id=lowercase__ ,forced_eos_token_id=lowercase__ ,**lowercase__ ,)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase = {0: '''batch'''}
__lowercase = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
__lowercase = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase__ ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowercase = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super().outputs
else:
__lowercase = super(lowercase__ ,self ).outputs
if self.use_past:
__lowercase , __lowercase = self.num_layers
for i in range(lowercase__ ):
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
__lowercase = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
# Generate decoder inputs
__lowercase = seq_length if not self.use_past else 1
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowercase = dict(**lowercase__ ,**lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
__lowercase = common_inputs['''decoder_input_ids'''].shape[1]
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = decoder_seq_length + 3
__lowercase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowercase = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase__ ,lowercase__ )] ,dim=1 )
__lowercase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowercase , __lowercase = self.num_layers
__lowercase = min(lowercase__ ,lowercase__ )
__lowercase = max(lowercase__ ,lowercase__ ) - min_num_layers
__lowercase = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
torch.zeros(lowercase__ ),
) )
# TODO: test this.
__lowercase = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase__ ,lowercase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowercase , __lowercase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase , __lowercase = self.num_layers
__lowercase , __lowercase = self.num_attention_heads
__lowercase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowercase = common_inputs['''attention_mask'''].dtype
__lowercase = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase__ ,lowercase__ ,dtype=lowercase__ )] ,dim=1 )
__lowercase = [
(torch.zeros(lowercase__ ), torch.zeros(lowercase__ )) for _ in range(lowercase__ )
]
return common_inputs
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = tokenizer.num_special_tokens_to_add(lowercase__ )
__lowercase = compute_effective_axis_dimension(
lowercase__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=lowercase__ )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowercase = dict(tokenizer(lowercase__ ,return_tensors=lowercase__ ) )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : PreTrainedTokenizer ,lowercase__ : int = -1 ,lowercase__ : int = -1 ,lowercase__ : bool = False ,lowercase__ : Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
elif self.task == "causal-lm":
__lowercase = self._generate_dummy_inputs_for_causal_lm(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
else:
__lowercase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowercase__ ,batch_size=lowercase__ ,seq_length=lowercase__ ,is_pair=lowercase__ ,framework=lowercase__ )
return common_inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Tuple ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__lowercase = super()._flatten_past_key_values_(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
else:
__lowercase = super(lowercase__ ,self )._flatten_past_key_values_(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )
| 41 | 0 |
from __future__ import annotations
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = array[indexa], array[indexa]
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None:
if length > 1:
SCREAMING_SNAKE_CASE_ = int(length / 2 )
for i in range(__UpperCAmelCase , low + middle ):
comp_and_swap(__UpperCAmelCase , __UpperCAmelCase , i + middle , __UpperCAmelCase )
bitonic_merge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
bitonic_merge(__UpperCAmelCase , low + middle , __UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> None:
if length > 1:
SCREAMING_SNAKE_CASE_ = int(length / 2 )
bitonic_sort(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , 1 )
bitonic_sort(__UpperCAmelCase , low + middle , __UpperCAmelCase , 0 )
bitonic_merge(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase__ : Tuple = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 31 |
'''simple docstring'''
from __future__ import annotations
def _A ( A__ , A__ ):
"""simple docstring"""
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(A__ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _A ( A__ , A__ ):
"""simple docstring"""
((__lowercase) , (__lowercase)) = extended_euclid(A__ , A__ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase , __lowercase = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
UpperCAmelCase_ = {"allegro/herbert-base-cased": 5_14}
UpperCAmelCase_ = {}
class __UpperCamelCase ( A__ ):
__A : List[str] = VOCAB_FILES_NAMES
__A : List[str] = PRETRAINED_VOCAB_FILES_MAP
__A : Optional[int] = PRETRAINED_INIT_CONFIGURATION
__A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[int] = HerbertTokenizer
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase="</s>" , **_UpperCamelCase , ):
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , cls_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , sep_token=_UpperCamelCase , **_UpperCamelCase , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase = None ):
_UpperCAmelCase = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
| 32 |
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _A ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowercase = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _A ( ):
"""simple docstring"""
assert _test_patching.open is open
__lowercase = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , A__ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , A__ ):
pass
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , A__ ) is None
with patch_submodule(_test_patching , '''len''' , A__ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_start_and_stop_mock__'''
__lowercase = patch_submodule(_test_patching , '''open''' , A__ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _A ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowercase = '''__test_patch_submodule_successive_join__'''
__lowercase = '''__test_patch_submodule_successive_dirname__'''
__lowercase = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , A__ ):
with patch_submodule(_test_patching , '''os.path.join''' , A__ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , A__ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _A ( ):
"""simple docstring"""
__lowercase = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , A__ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , A__ ):
pass
| 41 | 0 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
__lowercase : Tuple = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] , _a:int , _a:str , _a:Optional[Any] ):
snake_case__ = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case__ = VideoClassificationPipeline(model=_a , image_processor=_a , top_k=2 )
snake_case__ = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def SCREAMING_SNAKE_CASE__ ( self:List[Any] , _a:Optional[Any] , _a:Optional[Any] ):
for example in examples:
snake_case__ = video_classifier(_a )
self.assertEqual(
_a , [
{'''score''': ANY(_a ), '''label''': ANY(_a )},
{'''score''': ANY(_a ), '''label''': ANY(_a )},
] , )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
snake_case__ = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
snake_case__ = pipeline(
'''video-classification''' , model=_a , feature_extractor=_a , frame_sampling_rate=4 )
snake_case__ = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
snake_case__ = video_classifier(_a , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
snake_case__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
pass
| 33 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : Dict ,lowercase__ : int=1_3 ,lowercase__ : List[str]=7 ,lowercase__ : int=True ,lowercase__ : int=True ,lowercase__ : Union[str, Any]=True ,lowercase__ : List[Any]=True ,lowercase__ : str=9_9 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : Union[str, Any]=5 ,lowercase__ : List[Any]=4 ,lowercase__ : str=3_7 ,lowercase__ : Tuple="gelu" ,lowercase__ : List[Any]=0.1 ,lowercase__ : Dict=0.1 ,lowercase__ : int=1_2_8 ,lowercase__ : Dict=3_2 ,lowercase__ : Dict=1_6 ,lowercase__ : Any=2 ,lowercase__ : int=0.0_2 ,lowercase__ : List[str]=3 ,lowercase__ : Dict=4 ,lowercase__ : Optional[int]=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__lowercase = ids_tensor([self.batch_size] ,self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
return NezhaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : str ):
__lowercase = NezhaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ ,token_type_ids=lowercase__ )
__lowercase = model(lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Dict ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : Tuple ,lowercase__ : Optional[int] ,lowercase__ : List[Any] ,):
__lowercase = True
__lowercase = NezhaModel(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,encoder_attention_mask=lowercase__ ,)
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,encoder_hidden_states=lowercase__ ,)
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ):
__lowercase = NezhaForMaskedLM(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : int ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
__lowercase = NezhaForNextSentencePrediction(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : str ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ):
__lowercase = NezhaForPreTraining(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,next_sentence_label=lowercase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Tuple ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Union[str, Any] ):
__lowercase = NezhaForQuestionAnswering(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,start_positions=lowercase__ ,end_positions=lowercase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Tuple ,lowercase__ : str ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : Any ,lowercase__ : Optional[int] ,lowercase__ : int ):
__lowercase = self.num_labels
__lowercase = NezhaForSequenceClassification(lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Any ,lowercase__ : Optional[Any] ):
__lowercase = self.num_labels
__lowercase = NezhaForTokenClassification(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Dict ,lowercase__ : List[Any] ,lowercase__ : str ):
__lowercase = self.num_choices
__lowercase = NezhaForMultipleChoice(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
__lowercase = model(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,labels=lowercase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Tuple = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : List[str] = True
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : Any=False ):
__lowercase = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ )
if return_labels:
if model_class in get_values(lowercase__ ):
__lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowercase__ )
__lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = NezhaModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ):
# This regression test was failing with PyTorch < 1.3
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase = None
self.model_tester.create_and_check_model_as_decoder(
lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = NezhaModel.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__lowercase = True
__lowercase = model_class(config=lowercase__ )
__lowercase = self._prepare_for_class(lowercase__ ,lowercase__ )
__lowercase = torch.jit.trace(
lowercase__ ,(inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase__ ,os.path.join(lowercase__ ,'''bert.pt''' ) )
__lowercase = torch.jit.load(os.path.join(lowercase__ ,'''bert.pt''' ) ,map_location=lowercase__ )
loaded(inputs_dict['''input_ids'''].to(lowercase__ ) ,inputs_dict['''attention_mask'''].to(lowercase__ ) )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
__lowercase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(lowercase__ ,attention_mask=lowercase__ )[0]
__lowercase = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowercase__ ,atol=1e-4 ) )
| 41 | 0 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
SCREAMING_SNAKE_CASE_ = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
SCREAMING_SNAKE_CASE_ = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
SCREAMING_SNAKE_CASE_ = 'zero2'
SCREAMING_SNAKE_CASE_ = 'zero3'
SCREAMING_SNAKE_CASE_ = [ZEROa, ZEROa]
def __snake_case ( _lowercase ,_lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = parameterized.to_safe_name('''_'''.join(str(_lowercase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
SCREAMING_SNAKE_CASE_ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> str:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Optional[int]:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> List[Any]:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCamelCase_ , name_func=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_) -> Dict:
self.run_and_check(
stage=lowerCamelCase_ , model=lowerCamelCase_ , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Optional[Any]:
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1_0 , lowerCamelCase_ = True , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> Union[str, Any]:
UpperCamelCase = models[model]
UpperCamelCase = self.run_trainer(
stage=lowerCamelCase_ , model_name=lowerCamelCase_ , eval_steps=lowerCamelCase_ , num_train_epochs=1 , distributed=lowerCamelCase_ , fpaa=lowerCamelCase_ , )
self.do_checks(lowerCamelCase_)
return output_dir
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1_0 , lowerCamelCase_ = 1 , lowerCamelCase_ = True , lowerCamelCase_ = True , ) -> Dict:
UpperCamelCase = self.get_auto_remove_tmp_dir('''./xxx''' , after=lowerCamelCase_)
UpperCamelCase = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(lowerCamelCase_)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
UpperCamelCase = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
UpperCamelCase = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
UpperCamelCase = self.get_launcher(lowerCamelCase_)
UpperCamelCase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCamelCase_ , env=self.get_env())
return output_dir
def UpperCAmelCase__ ( self , lowerCamelCase_=False) -> List[Any]:
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
UpperCamelCase = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 34 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ :Optional[int] = logging.get_logger(__name__)
a_ :Union[str, Any] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = '''marian'''
lowerCamelCase : Tuple = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] , _lowercase : str=5_81_01 , _lowercase : Union[str, Any]=None , _lowercase : Tuple=10_24 , _lowercase : List[Any]=12 , _lowercase : int=40_96 , _lowercase : int=16 , _lowercase : str=12 , _lowercase : List[str]=40_96 , _lowercase : Tuple=16 , _lowercase : List[Any]=0.0 , _lowercase : Any=0.0 , _lowercase : List[Any]=True , _lowercase : Dict=True , _lowercase : Union[str, Any]="gelu" , _lowercase : int=10_24 , _lowercase : Optional[Any]=0.1 , _lowercase : List[Any]=0.0 , _lowercase : Optional[int]=0.0 , _lowercase : str=0.02 , _lowercase : Tuple=5_81_00 , _lowercase : int=False , _lowercase : Any=5_81_00 , _lowercase : Tuple=0 , _lowercase : Tuple=0 , _lowercase : List[Any]=True , **_lowercase : int , ):
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = decoder_vocab_size or vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = d_model
SCREAMING_SNAKE_CASE__ : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : int = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : int = dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = activation_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_function
SCREAMING_SNAKE_CASE__ : Tuple = init_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = use_cache
SCREAMING_SNAKE_CASE__ : str = encoder_layers
SCREAMING_SNAKE_CASE__ : Any = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
class lowercase ( _UpperCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowercase__ ( self : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.num_layers
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE__ : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowercase__ ( self : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().outputs
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super(_lowercase , self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
for i in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
SCREAMING_SNAKE_CASE__ : Any = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def lowercase__ ( self : Optional[Any] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : str = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Generate decoder inputs
SCREAMING_SNAKE_CASE__ : str = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE__ : Optional[int] = dict(**_lowercase , **_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = common_inputs['''input_ids'''].shape
SCREAMING_SNAKE_CASE__ : str = common_inputs['''decoder_input_ids'''].shape[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : str = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(_lowercase , _lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.num_layers
SCREAMING_SNAKE_CASE__ : str = min(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = max(_lowercase , _lowercase ) - min_num_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowercase , _lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def lowercase__ ( self : int , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE__ : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE__ : Any = seqlen + 2
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.num_layers
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.num_attention_heads
SCREAMING_SNAKE_CASE__ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE__ : Any = common_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
SCREAMING_SNAKE_CASE__ : List[str] = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def lowercase__ ( self : Optional[Any] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE__ : str = tokenizer.num_special_tokens_to_add(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = compute_effective_axis_dimension(
_lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE__ : List[Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dict(tokenizer(_lowercase , return_tensors=_lowercase ) )
return common_inputs
def lowercase__ ( self : List[str] , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self._generate_dummy_inputs_for_causal_lm(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
return common_inputs
def lowercase__ ( self : str , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = super()._flatten_past_key_values_(_lowercase , _lowercase , _lowercase , _lowercase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = super(_lowercase , self )._flatten_past_key_values_(
_lowercase , _lowercase , _lowercase , _lowercase )
@property
def lowercase__ ( self : Dict ):
return 1E-4
| 35 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ,**lowercase__ : Tuple ):
super().__init__(**lowercase__ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self ,'''vision''' )
self.check_model_type(lowercase__ )
def __call__( self : List[str] ,lowercase__ : Union[str, "Image.Image", List[Dict[str, Any]]] ,lowercase__ : Union[str, List[str]] = None ,**lowercase__ : str ,):
if "text_queries" in kwargs:
__lowercase = kwargs.pop('''text_queries''' )
if isinstance(lowercase__ ,(str, Image.Image) ):
__lowercase = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__lowercase = image
__lowercase = super().__call__(lowercase__ ,**lowercase__ )
return results
def SCREAMING_SNAKE_CASE ( self : int ,**lowercase__ : List[Any] ):
__lowercase = {}
if "threshold" in kwargs:
__lowercase = kwargs['''threshold''']
if "top_k" in kwargs:
__lowercase = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ):
__lowercase = load_image(inputs['''image'''] )
__lowercase = inputs['''candidate_labels''']
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = candidate_labels.split(''',''' )
__lowercase = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(lowercase__ ):
__lowercase = self.tokenizer(lowercase__ ,return_tensors=self.framework )
__lowercase = self.image_processor(lowercase__ ,return_tensors=self.framework )
yield {
"is_last": i == len(lowercase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ):
__lowercase = model_inputs.pop('''target_size''' )
__lowercase = model_inputs.pop('''candidate_label''' )
__lowercase = model_inputs.pop('''is_last''' )
__lowercase = self.model(**lowercase__ )
__lowercase = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : List[Any]=0.1 ,lowercase__ : List[str]=None ):
__lowercase = []
for model_output in model_outputs:
__lowercase = model_output['''candidate_label''']
__lowercase = BaseModelOutput(lowercase__ )
__lowercase = self.image_processor.post_process_object_detection(
outputs=lowercase__ ,threshold=lowercase__ ,target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
__lowercase = outputs['''scores'''][index].item()
__lowercase = self._get_bounding_box(outputs['''boxes'''][index][0] )
__lowercase = {'''score''': score, '''label''': label, '''box''': box}
results.append(lowercase__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : x["score"] ,reverse=lowercase__ )
if top_k:
__lowercase = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
__lowercase , __lowercase , __lowercase , __lowercase = box.int().tolist()
__lowercase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 41 | 0 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__lowercase : List[str] = logging.get_logger(__name__)
def lowercase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case : str = json.loads(__A )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case : List[Any] = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case : int = json.loads(__A )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __A ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : str = field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def snake_case_ ( self ):
'''simple docstring'''
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" ,SCREAMING_SNAKE_CASE_ ,)
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
snake_case : Tuple = torch.device("""cpu""" )
snake_case : Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
snake_case : Tuple = smp.local_rank()
snake_case : List[str] = torch.device("""cuda""" ,SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" ,timeout=self.ddp_timeout_delta )
snake_case : Union[str, Any] = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
snake_case : List[str] = torch.device("""cuda""" ,self.local_rank )
snake_case : Tuple = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case : Any = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case : Optional[int] = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" ,timeout=self.ddp_timeout_delta )
snake_case : Tuple = torch.device("""cuda""" ,self.local_rank )
snake_case : Optional[int] = 1
if device.type == "cuda":
torch.cuda.set_device(SCREAMING_SNAKE_CASE_ )
return device
@property
def snake_case_ ( self ):
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def snake_case_ ( self ):
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def snake_case_ ( self ):
'''simple docstring'''
return False
| 36 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli'
SCREAMING_SNAKE_CASE : Optional[Any] = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
SCREAMING_SNAKE_CASE : Any = 'text_classifier'
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']]
SCREAMING_SNAKE_CASE : List[str] = ['text']
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setup()
__lowercase = self.model.config
__lowercase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
__lowercase = int(lowercase__ )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ):
__lowercase = labels
return self.pre_processor(
[text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,)
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = outputs.logits
__lowercase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 41 | 0 |
from math import ceil
def UpperCamelCase_ ( __a = 1_001 ) -> int:
a__ : Optional[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
a__ : List[str] = 2 * i + 1
a__ : Optional[int] = 2 * i
a__ : Dict = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
UpperCamelCase : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 37 |
'''simple docstring'''
from collections.abc import Callable
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : Callable | None = None ):
# Stores actual heap items.
__lowercase = []
# Stores indexes of each item for supporting updates and deletion.
__lowercase = {}
# Stores current size of heap.
__lowercase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__lowercase = key or (lambda lowercase__ : x)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
return int((i - 1) / 2 ) if i > 0 else None
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ):
__lowercase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : int ,lowercase__ : int ):
__lowercase , __lowercase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__lowercase , __lowercase = self.arr[j], self.arr[i]
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
return self.arr[i][1] < self.arr[j][1]
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._left(lowercase__ )
__lowercase = self._right(lowercase__ )
__lowercase = i
if left is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = left
if right is not None and not self._cmp(lowercase__ ,lowercase__ ):
__lowercase = right
return valid_parent
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
__lowercase = self._parent(lowercase__ )
while parent is not None and not self._cmp(lowercase__ ,lowercase__ ):
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = parent, self._parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int ):
__lowercase = self._get_valid_parent(lowercase__ )
while valid_parent != index:
self._swap(lowercase__ ,lowercase__ )
__lowercase , __lowercase = valid_parent, self._get_valid_parent(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
__lowercase = [item, self.key(lowercase__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ):
if item not in self.pos_map:
return
__lowercase = self.pos_map[item]
del self.pos_map[item]
__lowercase = self.arr[self.size - 1]
__lowercase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase__ )
self._heapify_down(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : int ,lowercase__ : int ):
__lowercase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase__ )] )
else:
__lowercase = [item, self.key(lowercase__ )]
__lowercase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return self.arr[0] if self.size else None
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _A ( ):
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 0 |
'''simple docstring'''
from math import sqrt
def UpperCamelCase__ ( __magic_name__ : int ) -> int:
'''simple docstring'''
snake_case__ : Dict = 0
for i in range(1 , int(sqrt(__magic_name__ ) + 1 ) ):
if n % i == 0 and i != sqrt(__magic_name__ ):
total += i + n // i
elif i == sqrt(__magic_name__ ):
total += i
return total - n
def UpperCamelCase__ ( __magic_name__ : int = 1_00_00 ) -> int:
'''simple docstring'''
snake_case__ : Dict = sum(
i
for i in range(1 , __magic_name__ )
if sum_of_divisors(sum_of_divisors(__magic_name__ ) ) == i and sum_of_divisors(__magic_name__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 38 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : str ,**lowercase__ : Any ):
self.events.append('''on_init_end''' )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Optional[Any] ,lowercase__ : int ,**lowercase__ : Optional[int] ):
self.events.append('''on_train_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ,lowercase__ : int ,lowercase__ : int ,**lowercase__ : List[str] ):
self.events.append('''on_train_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : Any ,**lowercase__ : Optional[Any] ):
self.events.append('''on_epoch_begin''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Optional[Any] ,lowercase__ : int ,lowercase__ : Any ,**lowercase__ : Optional[int] ):
self.events.append('''on_epoch_end''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str] ,lowercase__ : str ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_step_begin''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ,lowercase__ : int ,lowercase__ : Optional[int] ,**lowercase__ : Dict ):
self.events.append('''on_step_end''' )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Any ,lowercase__ : Tuple ,lowercase__ : Union[str, Any] ,**lowercase__ : Any ):
self.events.append('''on_evaluate''' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : int ,**lowercase__ : Optional[Any] ):
self.events.append('''on_predict''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,**lowercase__ : int ):
self.events.append('''on_save''' )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[str] ,lowercase__ : Tuple ,lowercase__ : List[str] ,**lowercase__ : List[str] ):
self.events.append('''on_log''' )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : str ,lowercase__ : int ,lowercase__ : Dict ,**lowercase__ : str ):
self.events.append('''on_prediction_step''' )
@require_torch
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
shutil.rmtree(self.output_dir )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[Any]=0 ,lowercase__ : Any=0 ,lowercase__ : Tuple=6_4 ,lowercase__ : Optional[int]=6_4 ,lowercase__ : Optional[Any]=None ,lowercase__ : str=False ,**lowercase__ : Any ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionDataset(length=lowercase__ )
__lowercase = RegressionModelConfig(a=lowercase__ ,b=lowercase__ )
__lowercase = RegressionPreTrainedModel(lowercase__ )
__lowercase = TrainingArguments(self.output_dir ,disable_tqdm=lowercase__ ,report_to=[] ,**lowercase__ )
return Trainer(
lowercase__ ,lowercase__ ,train_dataset=lowercase__ ,eval_dataset=lowercase__ ,callbacks=lowercase__ ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Optional[int] ,lowercase__ : Any ):
self.assertEqual(len(lowercase__ ) ,len(lowercase__ ) )
# Order doesn't matter
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
__lowercase = sorted(lowercase__ ,key=lambda lowercase__ : cb.__name__ if isinstance(lowercase__ ,lowercase__ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase__ ,lowercase__ ):
if isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,lowercase__ )
elif isinstance(lowercase__ ,lowercase__ ) and not isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(lowercase__ ,cba.__class__ )
elif not isinstance(lowercase__ ,lowercase__ ) and isinstance(lowercase__ ,lowercase__ ):
self.assertEqual(cba.__class__ ,lowercase__ )
else:
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Union[str, Any] ):
__lowercase = ['''on_init_end''', '''on_train_begin''']
__lowercase = 0
__lowercase = len(trainer.get_eval_dataloader() )
__lowercase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(lowercase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = self.get_trainer()
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# Callbacks passed at init are added to the default callbacks
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowercase = self.get_trainer(disable_tqdm=lowercase__ )
__lowercase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowercase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(cb.__class__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
# We can also add, pop, or remove by instance
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase__ )
expected_callbacks.remove(lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
__lowercase = self.get_trainer()
__lowercase = trainer.callback_handler.callbacks[0]
__lowercase = trainer.pop_callback(lowercase__ )
self.assertEqual(lowercase__ ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
trainer.add_callback(lowercase__ )
expected_callbacks.insert(0 ,lowercase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' ,category=lowercase__ )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# Independent log/save/eval
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='''steps''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
__lowercase = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='''epoch''' )
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# A bit of everything
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=1_0 ,eval_steps=5 ,evaluation_strategy='''steps''' ,)
trainer.train()
__lowercase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase__ ,self.get_expected_events(lowercase__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowercase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,)
assert str(lowercase__ ) in warn_mock.call_args[0][0]
| 41 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = os.path.join(args.tf_model_dir , '''parameters.json''' )
snake_case_ = json.loads(open(SCREAMING_SNAKE_CASE__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('''.pt''' ):
snake_case_ = args.output + '''.pt'''
snake_case_ = OrderedDict()
with tf.device('''/CPU:0''' ):
snake_case_ = tf.train.load_checkpoint(args.tf_model_dir )
snake_case_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case_ = reader.get_tensor(SCREAMING_SNAKE_CASE__ ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
snake_case_ = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
snake_case_ = 8
snake_case_ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/moe''' ):
snake_case_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/softmlp/kernel''' ):
snake_case_ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
snake_case_ = key_name[-9:-7]
for i in range(16 ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
snake_case_ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/mlp''' ):
snake_case_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/p1/bias''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/p2/kernel''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/p2/bias''' ):
snake_case_ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/ln''' ):
snake_case_ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
snake_case_ = '''model.blocks.%d.feed_forward.norm.bias''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/g''' ):
snake_case_ = '''model.blocks.%d.feed_forward.norm.weight''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/att''' ):
snake_case_ = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
snake_case_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case_ = state[:, 0, :, :]
snake_case_ = state[:, 1, :, :]
snake_case_ = state[:, 2, :, :]
snake_case_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
snake_case_ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
snake_case_ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/o/kernel''' ):
snake_case_ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
snake_case_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/an''' ):
snake_case_ = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
snake_case_ = '''model.blocks.%d.self_attn.norm.bias''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.endswith('''/g''' ):
snake_case_ = '''model.blocks.%d.self_attn.norm.weight''' % player
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
snake_case_ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
snake_case_ = '''model.%s.weight''' % nlayer
snake_case_ = vnp.copy() # same in embedded
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
if key_name.startswith('''model/wte''' ):
snake_case_ = '''lm_head.weight'''
snake_case_ = vnp.copy() # same in embedded
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name.startswith('''model/wob''' ):
snake_case_ = '''final_logits_bias'''
snake_case_ = vnp.copy() # same in embedded
snake_case_ = state.reshape((1, -1) )
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name == "model/dense/kernel":
snake_case_ = '''model.last_project.weight'''
snake_case_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
elif key_name == "model/dense_1/bias":
snake_case_ = '''model.last_project.bias'''
snake_case_ = vnp.copy() # same because it is one dimensional
snake_case_ = torch.tensor(SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , args.output )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowerCAmelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 39 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : jnp.ndarray
SCREAMING_SNAKE_CASE : jnp.ndarray
class lowercase_ (nn.Module ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__lowercase = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase = self.block_out_channels[i]
__lowercase = self.block_out_channels[i + 1]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(lowercase__ )
__lowercase = blocks
__lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowercase__ : Optional[int] ):
__lowercase = self.conv_in(lowercase__ )
__lowercase = nn.silu(lowercase__ )
for block in self.blocks:
__lowercase = block(lowercase__ )
__lowercase = nn.silu(lowercase__ )
__lowercase = self.conv_out(lowercase__ )
return embedding
@flax_register_to_config
class lowercase_ (nn.Module , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 3_2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
SCREAMING_SNAKE_CASE : int = 2
SCREAMING_SNAKE_CASE : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE : int = 1_2_8_0
SCREAMING_SNAKE_CASE : float = 0.0
SCREAMING_SNAKE_CASE : bool = False
SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE : bool = True
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = "rgb"
SCREAMING_SNAKE_CASE : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : jax.random.KeyArray ):
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase = jnp.ones((1,) ,dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
__lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase = jnp.zeros(lowercase__ ,dtype=jnp.floataa )
__lowercase , __lowercase = jax.random.split(lowercase__ )
__lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )["params"]
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(lowercase__ ,dtype=self.dtype )
__lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
__lowercase = self.only_cross_attention
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = []
__lowercase = block_out_channels[0]
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(lowercase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
__lowercase = FlaxDownBlockaD(
in_channels=lowercase__ ,out_channels=lowercase__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(lowercase__ )
for _ in range(self.layers_per_block ):
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
if not is_final_block:
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(lowercase__ )
__lowercase = down_blocks
__lowercase = controlnet_down_blocks
# mid
__lowercase = block_out_channels[-1]
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
__lowercase = nn.Conv(
lowercase__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : float = 1.0 ,lowercase__ : bool = True ,lowercase__ : bool = False ,):
__lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase = jnp.flip(lowercase__ ,axis=1 )
# 1. time
if not isinstance(lowercase__ ,jnp.ndarray ):
__lowercase = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(lowercase__ ,jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(lowercase__ ,0 )
__lowercase = self.time_proj(lowercase__ )
__lowercase = self.time_embedding(lowercase__ )
# 2. pre-process
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.conv_in(lowercase__ )
__lowercase = jnp.transpose(lowercase__ ,(0, 2, 3, 1) )
__lowercase = self.controlnet_cond_embedding(lowercase__ )
sample += controlnet_cond
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase__ ,lowercase__ ):
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
else:
__lowercase , __lowercase = down_block(lowercase__ ,lowercase__ ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase = self.mid_block(lowercase__ ,lowercase__ ,lowercase__ ,deterministic=not train )
# 5. contronet blocks
__lowercase = ()
for down_block_res_sample, controlnet_block in zip(lowercase__ ,self.controlnet_down_blocks ):
__lowercase = controlnet_block(lowercase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase = controlnet_down_block_res_samples
__lowercase = self.controlnet_mid_block(lowercase__ )
# 6. scaling
__lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase__ ,mid_block_res_sample=lowercase__ )
| 41 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.