code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_SCREAMING_SNAKE_CASE ) , """Tatoeba directory does not exist.""" )
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def _snake_case ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ :Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_lowercase)
@slow
def _snake_case ( self : List[Any]):
self.resolver.convert_models(["heb-eng"])
@slow
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_lowercase)
assert mmeta["long_pair"] == "heb-eng"
| 631 |
'''simple docstring'''
from itertools import product
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
_lowerCAmelCase = sides_number
_lowerCAmelCase = max_face_number * dice_number
_lowerCAmelCase = [0] * (max_total + 1)
_lowerCAmelCase = 1
_lowerCAmelCase = range(__lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase , repeat=__lowerCamelCase ):
_lowerCAmelCase = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def A ():
_lowerCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_lowerCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_lowerCAmelCase = 0
_lowerCAmelCase = 9
_lowerCAmelCase = 4 * 9
_lowerCAmelCase = 6
for peter_total in range(__lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_lowerCAmelCase = (4**9) * (6**6)
_lowerCAmelCase = peter_wins_count / total_games_number
_lowerCAmelCase = round(__lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 5 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ : Tuple =logging.getLogger(__name__)
def a__ ( A__, A__ ):
return (preds == labels).mean()
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
_UpperCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_UpperCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_UpperCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
_UpperCAmelCase = field(metadata={"""help""": """Should contain the data files for the task."""} )
_UpperCAmelCase = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_UpperCAmelCase = field(
default=_SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def a__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s', __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ : int = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ : Tuple = processor.get_labels()
SCREAMING_SNAKE_CASE_ : Tuple = len(__lowerCamelCase )
except KeyError:
raise ValueError('Task not found: %s' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__lowerCamelCase, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=__lowerCamelCase, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__lowerCamelCase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__lowerCamelCase, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(A__ ) -> Dict:
SCREAMING_SNAKE_CASE_ : Dict = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__lowerCamelCase, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ : int = DataCollatorWithPadding(__lowerCamelCase, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ : Dict = Trainer(
model=__lowerCamelCase, args=__lowerCamelCase, train_dataset=__lowerCamelCase, eval_dataset=__lowerCamelCase, compute_metrics=__lowerCamelCase, data_collator=__lowerCamelCase, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE_ : str = trainer.evaluate()
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(training_args.output_dir, 'eval_results.txt' )
if trainer.is_world_master():
with open(__lowerCamelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s', __lowerCamelCase, __lowerCamelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__lowerCamelCase )
return results
def a__ ( A__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 101 |
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(1 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.align_to(_lowercase , _lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , )
_lowerCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=2.5 ) , Write(_lowercase ) , Write(_lowercase ) )
self.add(_lowercase )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(_lowercase ):
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
cpu_target.move_to(_lowercase )
cpu_target.generate_target()
_lowerCAmelCase = 0.46 / 4
_lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowercase , buff=0.0 )
cpu_targs.append(_lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowercase ) )
second_animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(*_lowercase )
self.wait()
| 5 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 333 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowercase = False
try:
_lowercase = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase = None , _lowercase = [] ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = choices
_lowerCAmelCase = prompt
if sys.platform == "win32":
_lowerCAmelCase = """*"""
else:
_lowerCAmelCase = """➔ """
def _lowercase ( self , _lowercase , _lowercase = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowercase )
else:
forceWrite(self.choices[index] , _lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowercase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _lowercase ( self , _lowercase , _lowercase = 1 ):
"""simple docstring"""
_lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowercase )
move_cursor(_lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowercase )] for number in range(10 )] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = int(chr(self.current_selection ) )
_lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowercase )
else:
return
else:
return
def _lowercase ( self , _lowercase = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowercase )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase = int(builtins.input() )
except ValueError:
_lowerCAmelCase = default_choice
else:
_lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_lowercase , """\n""" )
return choice
| 5 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE):
lowerCamelCase__ = '''swin2sr'''
lowerCamelCase__ = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self, __a=64, __a=1, __a=3, __a=180, __a=[6, 6, 6, 6, 6, 6], __a=[6, 6, 6, 6, 6, 6], __a=8, __a=2.0, __a=True, __a=0.0, __a=0.0, __a=0.1, __a="gelu", __a=False, __a=0.02, __a=1E-5, __a=2, __a=1.0, __a="1conv", __a="pixelshuffle", **__a, ):
'''simple docstring'''
super().__init__(**_lowercase)
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : Any = embed_dim
_lowerCAmelCase : str = depths
_lowerCAmelCase : Any = len(_lowercase)
_lowerCAmelCase : Optional[int] = num_heads
_lowerCAmelCase : Any = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Dict = qkv_bias
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Any = attention_probs_dropout_prob
_lowerCAmelCase : Any = drop_path_rate
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : Union[str, Any] = use_absolute_embeddings
_lowerCAmelCase : List[Any] = layer_norm_eps
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Optional[Any] = upscale
_lowerCAmelCase : str = img_range
_lowerCAmelCase : Any = resi_connection
_lowerCAmelCase : Union[str, Any] = upsampler
| 500 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_lowercase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def A (__lowerCamelCase :Optional[int] ):
_lowerCAmelCase = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
_lowerCAmelCase = line.strip()
if line:
_lowerCAmelCase = line.split()
_lowerCAmelCase = line_number
_lowerCAmelCase = words[0]
_lowerCAmelCase = value
return result
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any] , __lowerCamelCase :List[str] ):
for attribute in key.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = shape_pointer.shape
# let's reduce dimension
_lowerCAmelCase = value[0]
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Dict , __lowerCamelCase :List[Any] , __lowerCamelCase :int ):
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = """.""".join([key, hf_param_name] )
else:
_lowerCAmelCase = key
_lowerCAmelCase = value if """lm_head""" in full_key else value[0]
_lowercase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def A (__lowerCamelCase :Any , __lowerCamelCase :int , __lowerCamelCase :List[str]=None , __lowerCamelCase :List[Any]=None ):
_lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase = """weight_v"""
elif "bias" in name:
_lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = """weight"""
else:
_lowerCAmelCase = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def A (__lowerCamelCase :Any , __lowerCamelCase :Dict , __lowerCamelCase :Dict ):
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase = True
else:
_lowerCAmelCase = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[int] , __lowerCamelCase :Any , __lowerCamelCase :List[Any] , __lowerCamelCase :List[Any] ):
_lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase = name.split(""".""" )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any]=None , __lowerCamelCase :Union[str, Any]=None , __lowerCamelCase :str=True , __lowerCamelCase :str=False ):
if config_path is not None:
_lowerCAmelCase = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaConfig()
if is_seq_class:
_lowerCAmelCase = read_txt_into_dict(__lowerCamelCase )
_lowerCAmelCase = idalabel
_lowerCAmelCase = WavaVecaForSequenceClassification(__lowerCamelCase )
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
_lowerCAmelCase = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase = target_dict.pad_index
_lowerCAmelCase = target_dict.bos_index
_lowerCAmelCase = target_dict.eos_index
_lowerCAmelCase = len(target_dict.symbols )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase = 0
_lowerCAmelCase = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
_lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
_lowerCAmelCase = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_lowerCAmelCase = WavaVecaForCTC(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase = fairseq.tasks.setup_task(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_lowercase = parser.parse_args()
_lowercase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 5 | 0 |
'''simple docstring'''
import argparse
SCREAMING_SNAKE_CASE_ = "docs/source/_static/js/custom.js"
def lowerCamelCase__ ( a__) -> List[Any]:
"""simple docstring"""
with open(__lowerCamelCase , encoding='utf-8' , newline='\n') as f:
_snake_case : Dict = f.readlines()
_snake_case : str = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion ='):
index += 1
_snake_case : int = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {'):
index += 1
# We go until the end
while not lines[index].startswith('}'):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(__lowerCamelCase , 'w' , encoding='utf-8' , newline='\n') as f:
f.writelines(__lowerCamelCase)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
update_custom_js(args.version)
| 517 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = '''decision_transformer'''
_lowercase : Optional[Any] = ['''past_key_values''']
_lowercase : str = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=17 , _lowercase=4 , _lowercase=128 , _lowercase=4_096 , _lowercase=True , _lowercase=1 , _lowercase=1_024 , _lowercase=3 , _lowercase=1 , _lowercase=None , _lowercase="relu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50_256 , _lowercase=50_256 , _lowercase=False , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = state_dim
_lowerCAmelCase = act_dim
_lowerCAmelCase = hidden_size
_lowerCAmelCase = max_ep_len
_lowerCAmelCase = action_tanh
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 5 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _a :
'''simple docstring'''
def __init__( self , A__ , A__=13 , A__=7 , A__=False , A__=True , A__=False , A__=True , A__=33 , A__=32 , A__=5 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=512 , A__=16 , A__=2 , A__=0.0_2 , A__=3 , A__=4 , A__=None , ):
A__ : List[str] = parent
A__ : Tuple = batch_size
A__ : Union[str, Any] = seq_length
A__ : int = is_training
A__ : Optional[Any] = use_input_mask
A__ : List[str] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : List[str] = vocab_size
A__ : List[str] = hidden_size
A__ : Optional[Any] = num_hidden_layers
A__ : Optional[Any] = num_attention_heads
A__ : Optional[Any] = intermediate_size
A__ : Tuple = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : List[Any] = max_position_embeddings
A__ : Optional[int] = type_vocab_size
A__ : int = type_sequence_label_size
A__ : Union[str, Any] = initializer_range
A__ : Optional[int] = num_labels
A__ : str = num_choices
A__ : Optional[Any] = scope
def __A ( self ):
A__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : str = None
if self.use_input_mask:
A__ : str = random_attention_mask([self.batch_size, self.seq_length] )
A__ : str = None
A__ : Union[str, Any] = None
A__ : Tuple = None
if self.use_labels:
A__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
A__ : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : int = EsmModel(config=_lowercase )
model.to(_lowercase )
model.eval()
A__ : Optional[int] = model(_lowercase , attention_mask=_lowercase )
A__ : Optional[Any] = model(_lowercase )
A__ : List[str] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : str = EsmForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
A__ : List[str] = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , A__ , A__ , A__ , A__ , A__ , A__ ):
A__ : Any = self.num_labels
A__ : Any = EsmForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
A__ : str = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self ):
A__ : int = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : List[str] = config_and_inputs
A__ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: List[str] = False
UpperCAmelCase__: Tuple = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__: Tuple = ()
UpperCAmelCase__: Optional[int] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__: int = True
def __A ( self ):
A__ : Optional[int] = EsmModelTester(self )
A__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __A ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : Any = type
self.model_tester.create_and_check_model(*_lowercase )
def __A ( self ):
A__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def __A ( self ):
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def __A ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[str] = EsmModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __A ( self ):
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()[0]
A__ : int = EsmEmbeddings(config=_lowercase )
A__ : Optional[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
A__ : Optional[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
A__ : Optional[int] = create_position_ids_from_input_ids(_lowercase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_lowercase , _lowercase ) ) )
def __A ( self ):
A__ : Any = self.model_tester.prepare_config_and_inputs()[0]
A__ : List[str] = EsmEmbeddings(config=_lowercase )
A__ : Union[str, Any] = torch.empty(2 , 4 , 30 )
A__ : Any = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
A__ : Optional[int] = torch.as_tensor([expected_single_positions, expected_single_positions] )
A__ : str = embeddings.create_position_ids_from_inputs_embeds(_lowercase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_lowercase , _lowercase ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __A ( self ):
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __A ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __A ( self ):
pass
@require_torch
class _a (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@slow
def __A ( self ):
with torch.no_grad():
A__ : List[str] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
A__ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
A__ : Dict = model(_lowercase )[0]
A__ : Dict = 33
A__ : Optional[Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _lowercase )
A__ : str = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) )
@slow
def __A ( self ):
with torch.no_grad():
A__ : Tuple = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
A__ : Union[str, Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A__ : int = model(_lowercase )[0]
# compare the actual values for a slice.
A__ : Optional[int] = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) )
| 456 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_lowercase = None
_lowercase = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_lowercase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def A (__lowerCamelCase :int , __lowerCamelCase :Optional[Any]=1 , __lowerCamelCase :List[Any]=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def A (__lowerCamelCase :Any ):
with open(__lowerCamelCase , """r""" ) as f:
return json.load(__lowerCamelCase )
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :int ):
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple=True ):
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """tmp""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = read_json(os.path.join(__lowerCamelCase , """params.json""" ) )
_lowerCAmelCase = NUM_SHARDS[model_size]
_lowerCAmelCase = params["""n_layers"""]
_lowerCAmelCase = params["""n_heads"""]
_lowerCAmelCase = n_heads // num_shards
_lowerCAmelCase = params["""dim"""]
_lowerCAmelCase = dim // n_heads
_lowerCAmelCase = 10_000.0
_lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , __lowerCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
_lowerCAmelCase = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase = n_heads
_lowerCAmelCase = n_heads_per_shard
_lowerCAmelCase = dim
# permute for sliced rotary
def permute(__lowerCamelCase :Optional[int] , __lowerCamelCase :str=n_heads , __lowerCamelCase :str=dim , __lowerCamelCase :List[Any]=dim ):
return w.view(__lowerCamelCase , dima // n_heads // 2 , 2 , __lowerCamelCase ).transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase = torch.load(os.path.join(__lowerCamelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
_lowerCAmelCase = [
torch.load(os.path.join(__lowerCamelCase , f'consolidated.{i:02d}.pth' ) , map_location="""cpu""" )
for i in range(__lowerCamelCase )
]
_lowerCAmelCase = 0
_lowerCAmelCase = {"""weight_map""": {}}
for layer_i in range(__lowerCamelCase ):
_lowerCAmelCase = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_lowerCAmelCase = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_lowerCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__lowerCamelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__lowerCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
# Write configs
_lowerCAmelCase = {"""total_size""": param_count * 2}
write_json(__lowerCamelCase , os.path.join(__lowerCamelCase , """pytorch_model.bin.index.json""" ) )
_lowerCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_lowerCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
_lowerCAmelCase = LlamaConfig(
hidden_size=__lowerCamelCase , intermediate_size=compute_intermediate_size(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=__lowerCamelCase , )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_lowerCAmelCase = LlamaForCausalLM.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__lowerCamelCase , safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Union[str, Any] ):
# Initialize the tokenizer based on the `spm` model
_lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def A ():
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=__lowerCamelCase , help="""Whether or not to save using `safetensors`.""" )
_lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , __lowerCamelCase )
if __name__ == "__main__":
main()
| 5 | 0 |
"""simple docstring"""
import numpy as np
def a ( __snake_case : np.array ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = (DDPMScheduler,)
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowercase )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowercase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowercase )
with self.assertRaises(_lowercase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 5 | 0 |
'''simple docstring'''
import math
lowercase = 10
lowercase = 7
lowercase = BALLS_PER_COLOUR * NUM_COLOURS
def __A ( _SCREAMING_SNAKE_CASE : int = 2_0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = math.comb(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(20))
| 211 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase = logging.get_logger(__name__)
_lowercase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_SCREAMING_SNAKE_CASE )} )
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_lowercase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_lowercase : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_lowercase : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_lowercase : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_lowercase : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''train'''
_lowercase : Union[str, Any] = '''dev'''
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : SquadDataTrainingArguments
_lowercase : List[SquadFeatures]
_lowercase : Split
_lowercase : bool
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = Split.train , _lowercase = False , _lowercase = None , _lowercase = "pt" , ):
"""simple docstring"""
_lowerCAmelCase = args
_lowerCAmelCase = is_language_sensitive
_lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase = mode
# Load data features from cache or dataset file
_lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase = self.old_features["""features"""]
_lowerCAmelCase = self.old_features.get("""dataset""" , _lowercase )
_lowerCAmelCase = self.old_features.get("""examples""" , _lowercase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
_lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.features[i]
_lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 5 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , A_ : Optional[Any] , A_ : Union[str, Any]=1_3 , A_ : List[Any]=7 , A_ : Tuple=True , A_ : Union[str, Any]=True , A_ : List[str]=True , A_ : Optional[Any]=True , A_ : Optional[int]=9_9 , A_ : List[str]=3_2 , A_ : Optional[Any]=5 , A_ : str=4 , A_ : List[Any]=3_7 , A_ : Union[str, Any]="gelu" , A_ : Optional[int]=0.1 , A_ : List[str]=0.1 , A_ : int=5_1_2 , A_ : Dict=1_6 , A_ : Optional[Any]=2 , A_ : Optional[int]=0.02 , A_ : Any=4 , ):
lowerCAmelCase_ : List[str] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Optional[int] = seq_length
lowerCAmelCase_ : List[Any] = is_training
lowerCAmelCase_ : Union[str, Any] = use_attention_mask
lowerCAmelCase_ : int = use_token_type_ids
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Dict = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : List[Any] = type_sequence_label_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Any = num_choices
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase_ : Union[str, Any] = None
if self.use_attention_mask:
lowerCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowerCAmelCase_ : Optional[Any] = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = config_and_inputs
lowerCAmelCase_ : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase ):
_a = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : int = FlaxAlbertModelTester(self)
@slow
def UpperCAmelCase__ ( self : Optional[int]):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : Dict = model_class_name.from_pretrained('''albert-base-v2''')
lowerCAmelCase_ : Tuple = model(np.ones((1, 1)))
self.assertIsNotNone(_lowercase)
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Any = FlaxAlbertModel.from_pretrained('''albert-base-v2''')
lowerCAmelCase_ : List[str] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
lowerCAmelCase_ : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
lowerCAmelCase_ : str = model(_lowercase , attention_mask=_lowercase)[0]
lowerCAmelCase_ : Any = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , _lowercase)
lowerCAmelCase_ : List[str] = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1e-4))
| 171 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''dpr'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase="absolute" , _lowercase = 0 , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = projection_dim
_lowerCAmelCase = position_embedding_type
| 5 | 0 |
"""simple docstring"""
from itertools import product
def _lowerCamelCase ( UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> str:
"""simple docstring"""
A__ = sides_number
A__ = max_face_number * dice_number
A__ = [0] * (max_total + 1)
A__ = 1
A__ = range(__lowerCamelCase, max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase, repeat=__lowerCamelCase ):
A__ = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def _lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ = total_frequency_distribution(
sides_number=4, dice_number=9 )
A__ = total_frequency_distribution(
sides_number=6, dice_number=6 )
A__ = 0
A__ = 9
A__ = 4 * 9
A__ = 6
for peter_total in range(__lowerCamelCase, max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
A__ = (4**9) * (6**6)
A__ = peter_wins_count / total_games_number
A__ = round(__lowerCamelCase, ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }')
| 104 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_lowercase = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_lowercase = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_lowercase = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase ( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase ( self , _lowercase , _lowercase , _lowercase=None , _lowercase="uniform_average" , _lowercase=True ):
"""simple docstring"""
_lowerCAmelCase = mean_squared_error(
_lowercase , _lowercase , sample_weight=_lowercase , multioutput=_lowercase , squared=_lowercase )
return {"mse": mse}
| 5 | 0 |
import argparse
import json
import subprocess
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = []
A_ = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
A_ = subprocess.run(__lowerCamelCase , shell=__lowerCamelCase , stdout=subprocess.PIPE )
A_ = output.stdout.decode('''utf-8''' )
A_ = json.loads(__lowerCamelCase )
A_ = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__lowerCamelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
if len(__lowerCamelCase ) > 0:
A_ = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return values.split(''',''' )
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
__lowercase = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 203 |
'''simple docstring'''
def A ():
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
_lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A ():
return next(i for i in triangle_number_generator() if count_divisors(__lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 5 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE__ = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase ( a , a , a=None , a=None , a=None , a=None , a=None , a=None , ):
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ :List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE_ :int = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ :Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ :Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _UpperCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=13 , UpperCAmelCase : Any=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[int]=99 , UpperCAmelCase : Optional[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : int=0 , UpperCAmelCase : Union[str, Any]=0.02 , ):
SCREAMING_SNAKE_CASE_ :List[Any] = parent
SCREAMING_SNAKE_CASE_ :Dict = batch_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE_ :Dict = is_training
SCREAMING_SNAKE_CASE_ :Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_ :Dict = vocab_size
SCREAMING_SNAKE_CASE_ :str = hidden_size
SCREAMING_SNAKE_CASE_ :int = num_hidden_layers
SCREAMING_SNAKE_CASE_ :Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ :int = intermediate_size
SCREAMING_SNAKE_CASE_ :Any = hidden_act
SCREAMING_SNAKE_CASE_ :Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ :int = eos_token_id
SCREAMING_SNAKE_CASE_ :Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE_ :Dict = bos_token_id
SCREAMING_SNAKE_CASE_ :str = initializer_range
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :int = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
SCREAMING_SNAKE_CASE_ :int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
SCREAMING_SNAKE_CASE_ :Optional[int] = shift_tokens_right(_lowercase , 1 , 2)
SCREAMING_SNAKE_CASE_ :Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowercase , )
SCREAMING_SNAKE_CASE_ :Dict = prepare_blenderbot_inputs_dict(_lowercase , _lowercase , _lowercase)
return config, inputs_dict
def _snake_case ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple):
SCREAMING_SNAKE_CASE_ :List[str] = 20
SCREAMING_SNAKE_CASE_ :List[str] = model_class_name(_lowercase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model.encode(inputs_dict["input_ids"])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
SCREAMING_SNAKE_CASE_ :int = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase)
SCREAMING_SNAKE_CASE_ :Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4")
SCREAMING_SNAKE_CASE_ :str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ :int = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
SCREAMING_SNAKE_CASE_ :Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
SCREAMING_SNAKE_CASE_ :int = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
SCREAMING_SNAKE_CASE_ :str = model.decode(_lowercase , _lowercase)
SCREAMING_SNAKE_CASE_ :Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}")
def _snake_case ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]):
SCREAMING_SNAKE_CASE_ :Any = 20
SCREAMING_SNAKE_CASE_ :List[str] = model_class_name(_lowercase)
SCREAMING_SNAKE_CASE_ :int = model.encode(inputs_dict["input_ids"])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
SCREAMING_SNAKE_CASE_ :Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase)
SCREAMING_SNAKE_CASE_ :List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ :Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
SCREAMING_SNAKE_CASE_ :Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4")
SCREAMING_SNAKE_CASE_ :Tuple = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
SCREAMING_SNAKE_CASE_ :Optional[int] = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase)
SCREAMING_SNAKE_CASE_ :List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}")
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
lowerCamelCase_ : str = 9_9
def _snake_case ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ :str = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE_ :Any = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ :Tuple = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :str = self._get_config_and_data()
SCREAMING_SNAKE_CASE_ :List[Any] = FlaxBlenderbotForConditionalGeneration(_lowercase)
SCREAMING_SNAKE_CASE_ :Optional[int] = lm_model(input_ids=_lowercase)
SCREAMING_SNAKE_CASE_ :Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , _lowercase)
def _snake_case ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ :Dict = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE_ :str = FlaxBlenderbotForConditionalGeneration(_lowercase)
SCREAMING_SNAKE_CASE_ :List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa)
SCREAMING_SNAKE_CASE_ :List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa)
SCREAMING_SNAKE_CASE_ :Any = lm_model(input_ids=_lowercase , decoder_input_ids=_lowercase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , _lowercase)
def _snake_case ( self : Any):
SCREAMING_SNAKE_CASE_ :Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa)
SCREAMING_SNAKE_CASE_ :List[str] = shift_tokens_right(_lowercase , 1 , 2)
SCREAMING_SNAKE_CASE_ :int = np.equal(_lowercase , 1).astype(np.floataa).sum()
SCREAMING_SNAKE_CASE_ :Tuple = np.equal(_lowercase , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(_lowercase , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class _UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase , _SCREAMING_SNAKE_CASE ):
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Tuple = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase_ : Tuple = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :Optional[Any] = FlaxBlenderbotModelTester(self)
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase)
def _snake_case ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase)
def _snake_case ( self : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
SCREAMING_SNAKE_CASE_ :List[str] = self._prepare_for_class(_lowercase , _lowercase)
SCREAMING_SNAKE_CASE_ :int = model_class(_lowercase)
@jax.jit
def encode_jitted(UpperCAmelCase : str , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : Dict):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase)
with self.subTest("JIT Enabled"):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = encode_jitted(**_lowercase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ :List[Any] = encode_jitted(**_lowercase).to_tuple()
self.assertEqual(len(_lowercase) , len(_lowercase))
for jitted_output, output in zip(_lowercase , _lowercase):
self.assertEqual(jitted_output.shape , output.shape)
def _snake_case ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
SCREAMING_SNAKE_CASE_ :List[Any] = model_class(_lowercase)
SCREAMING_SNAKE_CASE_ :List[Any] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"])
SCREAMING_SNAKE_CASE_ :Optional[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest("JIT Enabled"):
SCREAMING_SNAKE_CASE_ :Optional[int] = decode_jitted(**_lowercase).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ :str = decode_jitted(**_lowercase).to_tuple()
self.assertEqual(len(_lowercase) , len(_lowercase))
for jitted_output, output in zip(_lowercase , _lowercase):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def _snake_case ( self : Optional[Any]):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model_class_name.from_pretrained("facebook/blenderbot-400M-distill")
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE_ :List[str] = np.ones((1, 1)) * model.config.eos_token_id
SCREAMING_SNAKE_CASE_ :List[Any] = model(_lowercase)
self.assertIsNotNone(_lowercase)
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU.")
@slow
def _snake_case ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ :Optional[Any] = {"num_beams": 1, "early_stopping": True, "min_length": 15, "max_length": 25}
SCREAMING_SNAKE_CASE_ :List[Any] = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
SCREAMING_SNAKE_CASE_ :int = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=_lowercase)
SCREAMING_SNAKE_CASE_ :str = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ["Sam"]
SCREAMING_SNAKE_CASE_ :str = tokenizer(_lowercase , return_tensors="jax")
SCREAMING_SNAKE_CASE_ :Tuple = model.generate(**_lowercase , **_lowercase)
SCREAMING_SNAKE_CASE_ :List[str] = "Sam is a great name. It means \"sun\" in Gaelic."
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer.batch_decode(_lowercase , **_lowercase)
assert generated_txt[0].strip() == tgt_text
| 631 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 5 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __lowercase :
"""simple docstring"""
_UpperCAmelCase = 42
_UpperCAmelCase = None
_UpperCAmelCase = None
def a__ ( ):
SCREAMING_SNAKE_CASE_ : List[str] = Node(1 )
SCREAMING_SNAKE_CASE_ : int = Node(2 )
SCREAMING_SNAKE_CASE_ : Tuple = Node(3 )
SCREAMING_SNAKE_CASE_ : List[Any] = Node(4 )
SCREAMING_SNAKE_CASE_ : int = Node(5 )
return tree
def a__ ( A__ ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def a__ ( A__ ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def a__ ( A__ ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def a__ ( A__ ):
return (max(height(root.left ), height(root.right ) ) + 1) if root else 0
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
if root is None:
return output
SCREAMING_SNAKE_CASE_ : str = deque([root] )
while process_queue:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = []
def populate_output(A__, A__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left, level - 1 )
populate_output(root.right, level - 1 )
populate_output(__lowerCamelCase, __lowerCamelCase )
return output
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : str = []
def populate_output(A__, A__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right, level - 1 )
populate_output(root.left, level - 1 )
populate_output(__lowerCamelCase, __lowerCamelCase )
return output
def a__ ( A__ ):
if root is None:
return []
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = height(__lowerCamelCase )
for h in range(1, height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase, __lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ : List[str] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase, __lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ : List[Any] = 0
return output
def a__ ( ): # Main function for testing.
SCREAMING_SNAKE_CASE_ : List[str] = make_tree()
print(F'''In-order Traversal: {inorder(__lowerCamelCase )}''' )
print(F'''Pre-order Traversal: {preorder(__lowerCamelCase )}''' )
print(F'''Post-order Traversal: {postorder(__lowerCamelCase )}''', '\n' )
print(F'''Height of Tree: {height(__lowerCamelCase )}''', '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(__lowerCamelCase ), '\n' )
print('Level-wise order Traversal: ' )
for level in range(1, height(__lowerCamelCase ) + 1 ):
print(F'''Level {level}:''', get_nodes_from_left_to_right(__lowerCamelCase, level=__lowerCamelCase ) )
print('\nZigZag order Traversal: ' )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 101 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase ={
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase =[
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 333 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_lowercase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_lowercase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_lowercase = BeautifulSoup(res.text, """html.parser""")
_lowercase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 5 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase):
lowerCamelCase__ = FunnelTokenizer
lowerCamelCase__ = FunnelTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Tuple = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def snake_case__ ( self, **__a):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname, **_lowercase)
def snake_case__ ( self, **__a):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname, **_lowercase)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "UNwant\u00E9d,running"
_lowerCAmelCase : str = "unwanted, running"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer_class(self.vocab_file)
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_lowercase, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase), [7, 4, 5, 10, 8, 9])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_tokenizers(do_lower_case=_lowercase)
for tokenizer in tokenizers:
_lowerCAmelCase : Union[str, Any] = tokenizer("UNwant\u00E9d,running")
_lowerCAmelCase : Optional[int] = len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len)
_lowerCAmelCase : Dict = tokenizer("UNwant\u00E9d,running", "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"], [2] + [0] * sentence_len + [1] * sentence_len)
| 500 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A ():
_lowerCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_lowerCAmelCase = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''vision-encoder-decoder'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
def __init__( self : int , **snake_case : Union[str, Any] ):
"""simple docstring"""
super().__init__(**_lowercase )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_snake_case : str = kwargs.pop('encoder' )
_snake_case : str = encoder_config.pop('model_type' )
_snake_case : int = kwargs.pop('decoder' )
_snake_case : int = decoder_config.pop('model_type' )
_snake_case : Union[str, Any] = AutoConfig.for_model(_lowercase , **_lowercase )
_snake_case : List[str] = AutoConfig.for_model(_lowercase , **_lowercase )
_snake_case : List[str] = True
@classmethod
def __UpperCAmelCase ( cls : Any , snake_case : Tuple , snake_case : Tuple , **snake_case : List[str] ):
"""simple docstring"""
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_snake_case : int = True
_snake_case : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_lowercase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case : Tuple = copy.deepcopy(self.__dict__ )
_snake_case : str = self.encoder.to_dict()
_snake_case : Optional[Any] = self.decoder.to_dict()
_snake_case : Dict = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = version.parse('''1.11''' )
@property
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return 1e-4
@property
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case : List[str] = OrderedDict()
_snake_case : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_snake_case : int = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_snake_case : str = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def __UpperCAmelCase ( self : Dict , snake_case : List[Any] , snake_case : Optional[Any] = -1 , snake_case : Dict = -1 , snake_case : Dict = False , snake_case : List[str] = None , ):
"""simple docstring"""
import torch
_snake_case : Dict = OrderedDict()
_snake_case : List[str] = super().generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
_snake_case , _snake_case : List[Any] = dummy_input['input_ids'].shape
_snake_case : int = (batch, encoder_sequence, self._config.encoder_hidden_size)
_snake_case : str = dummy_input.pop('input_ids' )
_snake_case : Dict = dummy_input.pop('attention_mask' )
_snake_case : Optional[int] = torch.zeros(_lowercase )
return common_inputs
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : str , snake_case : str ):
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(_lowercase )
def __UpperCAmelCase ( self : int , snake_case : Tuple , snake_case : int , snake_case : Tuple = "default" ):
"""simple docstring"""
_snake_case : Tuple = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_lowercase , _lowercase )
| 517 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 5 | 0 |
import os
from collections.abc import Iterator
def UpperCamelCase (lowercase_: str = "." ) -> Union[str, Any]:
for dir_path, dir_names, filenames in os.walk(__lowerCamelCase ):
A__ : Dict = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(__lowerCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(__lowerCamelCase , __lowerCamelCase ).lstrip("""./""" )
def UpperCamelCase (lowercase_: List[Any] ) -> Optional[int]:
return f"""{i * ' '}*""" if i else "\n##"
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> Tuple:
A__ : Any = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(__lowerCamelCase ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(__lowerCamelCase )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def UpperCamelCase (lowercase_: str = "." ) -> int:
A__ : Dict = """"""
for filepath in sorted(good_file_paths(__lowerCamelCase ) ):
A__ , A__ : Any = os.path.split(__lowerCamelCase )
if filepath != old_path:
A__ : str = print_path(__lowerCamelCase , __lowerCamelCase )
A__ : Dict = (filepath.count(os.sep ) + 1) if filepath else 0
A__ : Dict = f"""{filepath}/{filename}""".replace(""" """ , """%20""" )
A__ : Dict = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f"""{md_prefix(__lowerCamelCase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 456 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
"""simple docstring"""
def a ( __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ :str = [0 for i in range(len(__lowerCamelCase ) )]
# initialize interval's left pointer and right pointer
UpperCAmelCase_ ,UpperCAmelCase_ :Optional[int] = 0, 0
for i in range(1, len(__lowerCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCAmelCase_ :List[str] = min(right_pointer - i + 1, z_result[i - left_pointer] )
UpperCAmelCase_ :Dict = min_edge
while go_next(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCAmelCase_ ,UpperCAmelCase_ :Optional[Any] = i, i + z_result[i] - 1
return z_result
def a ( __snake_case : int, __snake_case : list[int], __snake_case : str ):
'''simple docstring'''
return i + z_result[i] < len(__lowerCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def a ( __snake_case : str, __snake_case : str ):
'''simple docstring'''
UpperCAmelCase_ :Union[str, Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCAmelCase_ :Optional[Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__lowerCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_lowercase ):
"""simple docstring"""
super().__init__(**_lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , _lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase = requests.get(_lowercase ).content
else:
with open(_lowercase , """rb""" ) as f:
_lowerCAmelCase = f.read()
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate )
if not isinstance(_lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 5 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __A ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', "stage2.cls_token") )
return token
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = "imagenet-1k-id2label.json"
__SCREAMING_SNAKE_CASE : Dict = 1_0_0_0
__SCREAMING_SNAKE_CASE : List[Any] = "huggingface/label-files"
__SCREAMING_SNAKE_CASE : Optional[Any] = num_labels
__SCREAMING_SNAKE_CASE : int = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
__SCREAMING_SNAKE_CASE : Dict = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : List[Any] = idalabel
__SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = CvtConfig(num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
__SCREAMING_SNAKE_CASE : Any = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
__SCREAMING_SNAKE_CASE : str = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [2, 2, 2_0]
__SCREAMING_SNAKE_CASE : str = [3, 1_2, 1_6]
__SCREAMING_SNAKE_CASE : List[str] = [1_9_2, 7_6_8, 1_0_2_4]
__SCREAMING_SNAKE_CASE : Dict = CvtForImageClassification(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
__SCREAMING_SNAKE_CASE : int = image_size
__SCREAMING_SNAKE_CASE : List[Any] = torch.load(__lowerCamelCase , map_location=torch.device("cpu" ) )
__SCREAMING_SNAKE_CASE : Dict = OrderedDict()
__SCREAMING_SNAKE_CASE : Any = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__SCREAMING_SNAKE_CASE : List[Any] = list_of_state_dict + cls_token(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = list_of_state_dict + embeddings(__lowerCamelCase )
for cnt in range(config.depth[idx] ):
__SCREAMING_SNAKE_CASE : Dict = list_of_state_dict + attention(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__lowerCamelCase )
for i in range(len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 211 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = ['''input_values''', '''padding_mask''']
def __init__( self , _lowercase = 1 , _lowercase = 24_000 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , **_lowercase , ):
"""simple docstring"""
super().__init__(feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase )
_lowerCAmelCase = chunk_length_s
_lowerCAmelCase = overlap
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , _lowercase , _lowercase = None , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
_lowerCAmelCase = True
_lowerCAmelCase = bool(
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
_lowerCAmelCase = [np.asarray(_lowercase , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
_lowerCAmelCase = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [np.asarray(_lowercase ).T]
# verify inputs are valid
for idx, example in enumerate(_lowercase ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
_lowerCAmelCase = None
_lowerCAmelCase = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
_lowerCAmelCase = min(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.floor(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
_lowerCAmelCase = max(array.shape[0] for array in raw_audio )
_lowerCAmelCase = int(np.ceil(max_length / self.chunk_stride ) )
_lowerCAmelCase = (nb_step - 1) * self.chunk_stride + self.chunk_length
_lowerCAmelCase = """max_length"""
else:
_lowerCAmelCase = input_values
# normal padding on batch
if padded_inputs is None:
_lowerCAmelCase = self.pad(
_lowercase , max_length=_lowercase , truncation=_lowercase , padding=_lowercase , return_attention_mask=_lowercase , )
if padding:
_lowerCAmelCase = padded_inputs.pop("""attention_mask""" )
_lowerCAmelCase = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
_lowerCAmelCase = example[..., None]
input_values.append(example.T )
_lowerCAmelCase = input_values
if return_tensors is not None:
_lowerCAmelCase = padded_inputs.convert_to_tensors(_lowercase )
return padded_inputs
| 5 | 0 |
from __future__ import annotations
import time
import numpy as np
A__ : Union[str, Any] = [8, 5, 9, 7]
A__ : Union[str, Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A__ : Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __snake_case :
def __init__( self : Union[str, Any] , A_ : int , A_ : Dict , A_ : Optional[int] , ):
lowerCAmelCase_ : List[str] = claim_vector
lowerCAmelCase_ : Tuple = allocated_resources_table
lowerCAmelCase_ : int = maximum_claim_table
def UpperCAmelCase__ ( self : str):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table)
for i in range(len(self.__allocated_resources_table[0]))
]
def UpperCAmelCase__ ( self : Tuple):
return np.array(self.__claim_vector) - np.array(
self.__processes_resource_summation())
def UpperCAmelCase__ ( self : str):
return [
list(np.array(self.__maximum_claim_table[i]) - np.array(_lowercase))
for i, allocated_resource in enumerate(self.__allocated_resources_table)
]
def UpperCAmelCase__ ( self : List[Any]):
return {self.__need().index(_lowercase): i for i in self.__need()}
def UpperCAmelCase__ ( self : Any , **A_ : Tuple):
lowerCAmelCase_ : List[Any] = self.__need()
lowerCAmelCase_ : str = self.__allocated_resources_table
lowerCAmelCase_ : List[Any] = self.__available_resources()
lowerCAmelCase_ : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 5_0 + '''\n''')
while need_list:
lowerCAmelCase_ : Tuple = False
for each_need in need_list:
lowerCAmelCase_ : List[str] = True
for index, need in enumerate(_lowercase):
if need > available_resources[index]:
lowerCAmelCase_ : List[Any] = False
break
if execution:
lowerCAmelCase_ : Dict = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCAmelCase_ : str = original_need_index
print(F"""Process {process_number + 1} is executing.""")
# remove the process run from stack
need_list.remove(_lowercase)
# update available/freed resources stack
lowerCAmelCase_ : Optional[int] = np.array(_lowercase) + np.array(
alloc_resources_table[process_number])
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(_lowercase) for x in available_resources]))
break
if safe:
print('''The process is in a safe state.\n''')
else:
print('''System in unsafe state. Aborting...\n''')
break
def UpperCAmelCase__ ( self : List[str]):
print(''' ''' * 9 + '''Allocated Resource Table''')
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_lowercase) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item)
+ '''\n''')
print(''' ''' * 9 + '''System Resource Table''')
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_lowercase) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item)
+ '''\n''')
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(_lowercase) for x in self.__claim_vector))
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(_lowercase) for x in self.__available_resources()))
time.sleep(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171 |
'''simple docstring'''
_lowercase = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowercase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowercase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 5 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( UpperCAmelCase_ : list[int] ) -> Optional[Any]:
"""simple docstring"""
if not nums:
return 0
A__ = nums[0]
A__ = 0
for num in nums[1:]:
A__ , A__ = (
max_excluding + num,
max(__lowerCamelCase, __lowerCamelCase ),
)
return max(__lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 |
'''simple docstring'''
import functools
def A (__lowerCamelCase :list[int] , __lowerCamelCase :list[int] ):
# Validation
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(__lowerCamelCase ) != 3 or not all(isinstance(__lowerCamelCase , __lowerCamelCase ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(__lowerCamelCase ) == 0:
return 0
if min(__lowerCamelCase ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(__lowerCamelCase ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
_lowerCAmelCase = set(__lowerCamelCase )
@functools.cache
def dynamic_programming(__lowerCamelCase :int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
__lowercase = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {f'funnel-transformer/{name}': 512 for name in _model_names}
__lowercase = {f'funnel-transformer/{name}': {"""do_lower_case""": True} for name in _model_names}
class _lowercase ( _SCREAMING_SNAKE_CASE ):
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Any = PRETRAINED_INIT_CONFIGURATION
_lowercase : List[str] = FunnelTokenizer
_lowercase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : int = 2
def __init__( self : List[str] , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]="<unk>" , lowerCamelCase__ : Dict="<sep>" , lowerCamelCase__ : str="<pad>" , lowerCamelCase__ : Optional[int]="<cls>" , lowerCamelCase__ : Dict="<mask>" , lowerCamelCase__ : Any="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Optional[Any]="##" , **lowerCamelCase__ : Optional[Any] , ) -> Any:
"""simple docstring"""
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , clean_text=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , wordpieces_prefix=_lowercase , **_lowercase , )
A_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
A_ = getattr(_lowercase , normalizer_state.pop('''type''' ) )
A_ = do_lower_case
A_ = strip_accents
A_ = tokenize_chinese_chars
A_ = normalizer_class(**_lowercase )
A_ = do_lower_case
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str=None ) -> Any:
"""simple docstring"""
A_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple = None ) -> Union[str, Any]:
"""simple docstring"""
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] = None ) -> Optional[Any]:
"""simple docstring"""
A_ = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 203 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 384
if "tiny" in model_name:
_lowerCAmelCase = [3, 3, 9, 3]
_lowerCAmelCase = [96, 192, 384, 768]
if "small" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [96, 192, 384, 768]
if "base" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [128, 256, 512, 1024]
_lowerCAmelCase = 512
if "large" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [192, 384, 768, 1536]
_lowerCAmelCase = 768
if "xlarge" in model_name:
_lowerCAmelCase = [3, 3, 27, 3]
_lowerCAmelCase = [256, 512, 1024, 2048]
_lowerCAmelCase = 1024
# set label information
_lowerCAmelCase = 150
_lowerCAmelCase = """huggingface/label-files"""
_lowerCAmelCase = """ade20k-id2label.json"""
_lowerCAmelCase = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = ConvNextConfig(
depths=__lowerCamelCase , hidden_sizes=__lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
_lowerCAmelCase = UperNetConfig(
backbone_config=__lowerCamelCase , auxiliary_in_channels=__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , )
return config
def A (__lowerCamelCase :Optional[Any] ):
_lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Dict , __lowerCamelCase :Tuple ):
_lowerCAmelCase = dct.pop(__lowerCamelCase )
_lowerCAmelCase = val
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Any ):
_lowerCAmelCase = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
_lowerCAmelCase = model_name_to_url[model_name]
_lowerCAmelCase = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="""cpu""" )["""state_dict"""]
_lowerCAmelCase = get_upernet_config(__lowerCamelCase )
_lowerCAmelCase = UperNetForSemanticSegmentation(__lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase = state_dict.pop(__lowerCamelCase )
if "bn" in key:
_lowerCAmelCase = key.replace("""bn""" , """batch_norm""" )
_lowerCAmelCase = val
# rename keys
_lowerCAmelCase = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify on image
_lowerCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_lowerCAmelCase = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" )
_lowerCAmelCase = SegformerImageProcessor()
_lowerCAmelCase = processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_lowerCAmelCase = model(__lowerCamelCase )
if model_name == "upernet-convnext-tiny":
_lowerCAmelCase = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] )
elif model_name == "upernet-convnext-small":
_lowerCAmelCase = torch.tensor(
[[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] )
elif model_name == "upernet-convnext-base":
_lowerCAmelCase = torch.tensor(
[[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] )
elif model_name == "upernet-convnext-large":
_lowerCAmelCase = torch.tensor(
[[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] )
elif model_name == "upernet-convnext-xlarge":
_lowerCAmelCase = torch.tensor(
[[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCamelCase )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F"""upernet-convnext-{size}""" for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowercase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 5 | 0 |
def lowercase ( a = 100 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = 0
SCREAMING_SNAKE_CASE_ :Optional[Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 631 |
'''simple docstring'''
from itertools import product
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
_lowerCAmelCase = sides_number
_lowerCAmelCase = max_face_number * dice_number
_lowerCAmelCase = [0] * (max_total + 1)
_lowerCAmelCase = 1
_lowerCAmelCase = range(__lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCamelCase , repeat=__lowerCamelCase ):
_lowerCAmelCase = sum(__lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def A ():
_lowerCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_lowerCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_lowerCAmelCase = 0
_lowerCAmelCase = 9
_lowerCAmelCase = 4 * 9
_lowerCAmelCase = 6
for peter_total in range(__lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_lowerCAmelCase = (4**9) * (6**6)
_lowerCAmelCase = peter_wins_count / total_games_number
_lowerCAmelCase = round(__lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 5 | 0 |
import os
def a__ ( ):
SCREAMING_SNAKE_CASE_ : int = os.path.join(os.path.dirname(__lowerCamelCase ), 'num.txt' )
with open(__lowerCamelCase ) as file_hand:
return str(sum(int(__lowerCamelCase ) for line in file_hand ) )[:1_0]
if __name__ == "__main__":
print(solution())
| 101 |
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""CPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(1 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""GPU""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.align_to(_lowercase , _lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowercase )
_lowerCAmelCase = [mem.copy() for i in range(6 )]
_lowerCAmelCase = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
_lowerCAmelCase = Text("""Model""" , font_size=24 )
_lowerCAmelCase = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) , )
_lowerCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
_lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=2.5 ) , Write(_lowercase ) , Write(_lowercase ) )
self.add(_lowercase )
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, rect in enumerate(_lowercase ):
_lowerCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
cpu_target.move_to(_lowercase )
cpu_target.generate_target()
_lowerCAmelCase = 0.46 / 4
_lowerCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowercase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowercase , buff=0.0 )
cpu_targs.append(_lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowercase ) )
second_animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(*_lowercase )
self.wait()
| 5 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase =logging.get_logger(__name__)
class __magic_name__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
_UpperCAmelCase : Optional[Any] = '''maskformer-swin'''
_UpperCAmelCase : Union[str, Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Tuple ,__SCREAMING_SNAKE_CASE : str=2_2_4 ,__SCREAMING_SNAKE_CASE : Optional[Any]=4 ,__SCREAMING_SNAKE_CASE : int=3 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=9_6 ,__SCREAMING_SNAKE_CASE : Any=[2, 2, 6, 2] ,__SCREAMING_SNAKE_CASE : Union[str, Any]=[3, 6, 1_2, 2_4] ,__SCREAMING_SNAKE_CASE : Tuple=7 ,__SCREAMING_SNAKE_CASE : str=4.0 ,__SCREAMING_SNAKE_CASE : List[Any]=True ,__SCREAMING_SNAKE_CASE : str=0.0 ,__SCREAMING_SNAKE_CASE : List[str]=0.0 ,__SCREAMING_SNAKE_CASE : Dict=0.1 ,__SCREAMING_SNAKE_CASE : List[Any]="gelu" ,__SCREAMING_SNAKE_CASE : Dict=False ,__SCREAMING_SNAKE_CASE : Optional[int]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[Any]=1e-5 ,__SCREAMING_SNAKE_CASE : Optional[Any]=None ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,**__SCREAMING_SNAKE_CASE : int ,):
super().__init__(**_lowercase )
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = len(_lowercase )
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
UpperCAmelCase = ["stem"] + [f'''stage{idx}''' for idx in range(1 ,len(_lowercase ) + 1 )]
UpperCAmelCase , UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=_lowercase ,out_indices=_lowercase ,stage_names=self.stage_names )
| 333 |
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowercase = False
try:
_lowercase = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase = None , _lowercase = [] ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = choices
_lowerCAmelCase = prompt
if sys.platform == "win32":
_lowerCAmelCase = """*"""
else:
_lowerCAmelCase = """➔ """
def _lowercase ( self , _lowercase , _lowercase = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowercase )
else:
forceWrite(self.choices[index] , _lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowercase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _lowercase ( self , _lowercase , _lowercase = 1 ):
"""simple docstring"""
_lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowercase )
move_cursor(_lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowercase )] for number in range(10 )] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = int(chr(self.current_selection ) )
_lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowercase )
else:
return
else:
return
def _lowercase ( self , _lowercase = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowercase )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase = int(builtins.input() )
except ValueError:
_lowerCAmelCase = default_choice
else:
_lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_lowercase , """\n""" )
return choice
| 5 | 0 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
_lowerCAmelCase : Dict = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowerCAmelCase : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 500 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_lowercase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def A (__lowerCamelCase :Optional[int] ):
_lowerCAmelCase = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
_lowerCAmelCase = line.strip()
if line:
_lowerCAmelCase = line.split()
_lowerCAmelCase = line_number
_lowerCAmelCase = words[0]
_lowerCAmelCase = value
return result
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any] , __lowerCamelCase :List[str] ):
for attribute in key.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = shape_pointer.shape
# let's reduce dimension
_lowerCAmelCase = value[0]
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Dict , __lowerCamelCase :List[Any] , __lowerCamelCase :int ):
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = """.""".join([key, hf_param_name] )
else:
_lowerCAmelCase = key
_lowerCAmelCase = value if """lm_head""" in full_key else value[0]
_lowercase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def A (__lowerCamelCase :Any , __lowerCamelCase :int , __lowerCamelCase :List[str]=None , __lowerCamelCase :List[Any]=None ):
_lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase = """weight_v"""
elif "bias" in name:
_lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = """weight"""
else:
_lowerCAmelCase = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def A (__lowerCamelCase :Any , __lowerCamelCase :Dict , __lowerCamelCase :Dict ):
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase = True
else:
_lowerCAmelCase = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[int] , __lowerCamelCase :Any , __lowerCamelCase :List[Any] , __lowerCamelCase :List[Any] ):
_lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase = name.split(""".""" )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any]=None , __lowerCamelCase :Union[str, Any]=None , __lowerCamelCase :str=True , __lowerCamelCase :str=False ):
if config_path is not None:
_lowerCAmelCase = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaConfig()
if is_seq_class:
_lowerCAmelCase = read_txt_into_dict(__lowerCamelCase )
_lowerCAmelCase = idalabel
_lowerCAmelCase = WavaVecaForSequenceClassification(__lowerCamelCase )
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
_lowerCAmelCase = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase = target_dict.pad_index
_lowerCAmelCase = target_dict.bos_index
_lowerCAmelCase = target_dict.eos_index
_lowerCAmelCase = len(target_dict.symbols )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase = 0
_lowerCAmelCase = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
_lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
_lowerCAmelCase = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_lowerCAmelCase = WavaVecaForCTC(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase = fairseq.tasks.setup_task(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_lowercase = parser.parse_args()
_lowercase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 5 | 0 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''Speech2TextFeatureExtractor'''
SCREAMING_SNAKE_CASE__ : Any = '''Speech2TextTokenizer'''
def __init__( self : int , snake_case : List[Any] , snake_case : int ):
"""simple docstring"""
super().__init__(_lowercase , _lowercase )
_snake_case : Tuple = self.feature_extractor
_snake_case : int = False
def __call__( self : List[Any] , *snake_case : List[Any] , **snake_case : List[Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_lowercase , **_lowercase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_snake_case : int = kwargs.pop('raw_speech' )
else:
_snake_case : Union[str, Any] = kwargs.pop('audio' , _lowercase )
_snake_case : int = kwargs.pop('sampling_rate' , _lowercase )
_snake_case : Dict = kwargs.pop('text' , _lowercase )
if len(_lowercase ) > 0:
_snake_case : Tuple = args[0]
_snake_case : Union[str, Any] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_snake_case : str = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
if text is not None:
_snake_case : List[str] = self.tokenizer(_lowercase , **_lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_snake_case : List[str] = encodings['input_ids']
return inputs
def __UpperCAmelCase ( self : Any , *snake_case : Union[str, Any] , **snake_case : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def __UpperCAmelCase ( self : List[Any] , *snake_case : Dict , **snake_case : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*_lowercase , **_lowercase )
@contextmanager
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_snake_case : int = True
_snake_case : Tuple = self.tokenizer
yield
_snake_case : Optional[Any] = self.feature_extractor
_snake_case : Optional[int] = False
| 517 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[str] = '''decision_transformer'''
_lowercase : Optional[Any] = ['''past_key_values''']
_lowercase : str = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowercase=17 , _lowercase=4 , _lowercase=128 , _lowercase=4_096 , _lowercase=True , _lowercase=1 , _lowercase=1_024 , _lowercase=3 , _lowercase=1 , _lowercase=None , _lowercase="relu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase=True , _lowercase=True , _lowercase=50_256 , _lowercase=50_256 , _lowercase=False , _lowercase=False , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = state_dim
_lowerCAmelCase = act_dim
_lowerCAmelCase = hidden_size
_lowerCAmelCase = max_ep_len
_lowerCAmelCase = action_tanh
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
| 5 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : int = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
A_ : Optional[Any] = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
A_ : str = 0
A_ : Union[str, Any] = 0XE000
A_ : Union[str, Any] = 0XE001
A_ : int = 0XE002
A_ : List[Any] = 0XE003
A_ : List[Any] = 0XE004
# Maps special codepoints to human-readable names.
A_ : str = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: '[CLS]',
SEP: '[SEP]',
BOS: '[BOS]',
MASK: '[MASK]',
PAD: '[PAD]',
RESERVED: '[RESERVED]',
}
# Maps special codepoint human-readable names to their codepoint values.
A_ : Dict = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _a (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A__=chr(_lowercase ) , A__=chr(_lowercase ) , A__=chr(_lowercase ) , A__=chr(_lowercase ) , A__=chr(_lowercase ) , A__=chr(_lowercase ) , A__=False , A__=2048 , **A__ , ):
A__ : Tuple = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
A__ : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
A__ : Optional[int] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
A__ : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
A__ : Optional[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , model_max_length=_lowercase , **_lowercase , )
# Creates a mapping for looking up the IDs of special symbols.
A__ : Dict = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
A__ : Optional[Any] = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
A__ : List[Any] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
A__ : Optional[Any] = UNICODE_VOCAB_SIZE
A__ : List[Any] = len(self._special_codepoints )
@property
def __A ( self ):
return self._unicode_vocab_size
def __A ( self , A__ ):
return list(_lowercase )
def __A ( self , A__ ):
try:
return ord(_lowercase )
except TypeError:
raise ValueError(F"""invalid token: \'{token}\'""" )
def __A ( self , A__ ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_lowercase )
except TypeError:
raise ValueError(F"""invalid id: {index}""" )
def __A ( self , A__ ):
return "".join(_lowercase )
def __A ( self , A__ , A__ = None ):
A__ : int = [self.sep_token_id]
A__ : str = [self.cls_token_id]
A__ : List[Any] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __A ( self , A__ , A__ = None , A__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
A__ : Union[str, Any] = [1] + ([0] * len(_lowercase )) + [1]
if token_ids_a is not None:
result += ([0] * len(_lowercase )) + [1]
return result
def __A ( self , A__ , A__ = None ):
A__ : List[str] = [self.sep_token_id]
A__ : List[Any] = [self.cls_token_id]
A__ : int = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __A ( self , A__ , A__ = None ):
return ()
| 456 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_lowercase = None
_lowercase = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_lowercase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def A (__lowerCamelCase :int , __lowerCamelCase :Optional[Any]=1 , __lowerCamelCase :List[Any]=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def A (__lowerCamelCase :Any ):
with open(__lowerCamelCase , """r""" ) as f:
return json.load(__lowerCamelCase )
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :int ):
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple=True ):
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """tmp""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = read_json(os.path.join(__lowerCamelCase , """params.json""" ) )
_lowerCAmelCase = NUM_SHARDS[model_size]
_lowerCAmelCase = params["""n_layers"""]
_lowerCAmelCase = params["""n_heads"""]
_lowerCAmelCase = n_heads // num_shards
_lowerCAmelCase = params["""dim"""]
_lowerCAmelCase = dim // n_heads
_lowerCAmelCase = 10_000.0
_lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , __lowerCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
_lowerCAmelCase = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase = n_heads
_lowerCAmelCase = n_heads_per_shard
_lowerCAmelCase = dim
# permute for sliced rotary
def permute(__lowerCamelCase :Optional[int] , __lowerCamelCase :str=n_heads , __lowerCamelCase :str=dim , __lowerCamelCase :List[Any]=dim ):
return w.view(__lowerCamelCase , dima // n_heads // 2 , 2 , __lowerCamelCase ).transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase = torch.load(os.path.join(__lowerCamelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
_lowerCAmelCase = [
torch.load(os.path.join(__lowerCamelCase , f'consolidated.{i:02d}.pth' ) , map_location="""cpu""" )
for i in range(__lowerCamelCase )
]
_lowerCAmelCase = 0
_lowerCAmelCase = {"""weight_map""": {}}
for layer_i in range(__lowerCamelCase ):
_lowerCAmelCase = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_lowerCAmelCase = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_lowerCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__lowerCamelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__lowerCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
# Write configs
_lowerCAmelCase = {"""total_size""": param_count * 2}
write_json(__lowerCamelCase , os.path.join(__lowerCamelCase , """pytorch_model.bin.index.json""" ) )
_lowerCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_lowerCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
_lowerCAmelCase = LlamaConfig(
hidden_size=__lowerCamelCase , intermediate_size=compute_intermediate_size(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=__lowerCamelCase , )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_lowerCAmelCase = LlamaForCausalLM.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__lowerCamelCase , safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Union[str, Any] ):
# Initialize the tokenizer based on the `spm` model
_lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def A ():
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=__lowerCamelCase , help="""Whether or not to save using `safetensors`.""" )
_lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , __lowerCamelCase )
if __name__ == "__main__":
main()
| 5 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _snake_case ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , *snake_case : str , **snake_case : Any ):
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 608 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Tuple = (DDPMScheduler,)
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowercase )
return config
def _lowercase ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def _lowercase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = len(_lowercase )
_lowerCAmelCase = self.dummy_model()
_lowerCAmelCase = self.dummy_sample_deter
_lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowercase ) ):
# 1. predict noise residual
_lowerCAmelCase = model(_lowercase , _lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCAmelCase = scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCAmelCase = pred_prev_sample
_lowerCAmelCase = torch.sum(torch.abs(_lowercase ) )
_lowerCAmelCase = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowercase )
_lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowercase ):
if i == len(_lowercase ) - 1:
_lowerCAmelCase = -1
else:
_lowerCAmelCase = timesteps[i + 1]
_lowerCAmelCase = scheduler.previous_timestep(_lowercase )
_lowerCAmelCase = prev_t.item()
self.assertEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowercase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [100, 87, 50, 1, 0]
_lowerCAmelCase = len(_lowercase )
with self.assertRaises(_lowercase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowercase , timesteps=_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.scheduler_classes[0]
_lowerCAmelCase = self.get_scheduler_config()
_lowerCAmelCase = scheduler_class(**_lowercase )
_lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowercase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowercase )
| 5 | 0 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __A ( _SCREAMING_SNAKE_CASE : int = 5_0_0_0 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , __lowerCamelCase )]
for i, pentagonal_i in enumerate(__lowerCamelCase ):
for j in range(__lowerCamelCase , len(__lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = pentagonal_nums[j]
__SCREAMING_SNAKE_CASE : int = pentagonal_i + pentagonal_j
__SCREAMING_SNAKE_CASE : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(__lowerCamelCase ) and is_pentagonal(__lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 211 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase = logging.get_logger(__name__)
_lowercase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_SCREAMING_SNAKE_CASE )} )
_lowercase : str = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
_lowercase : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_lowercase : int = field(
default=1_2_8 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
_lowercase : int = field(
default=6_4 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
_lowercase : int = field(
default=3_0 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
_lowercase : bool = field(
default=_SCREAMING_SNAKE_CASE , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
_lowercase : float = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=2_0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
_lowercase : int = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
_lowercase : int = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''train'''
_lowercase : Union[str, Any] = '''dev'''
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : SquadDataTrainingArguments
_lowercase : List[SquadFeatures]
_lowercase : Split
_lowercase : bool
def __init__( self , _lowercase , _lowercase , _lowercase = None , _lowercase = Split.train , _lowercase = False , _lowercase = None , _lowercase = "pt" , ):
"""simple docstring"""
_lowerCAmelCase = args
_lowerCAmelCase = is_language_sensitive
_lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
_lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
_lowerCAmelCase = mode
# Load data features from cache or dataset file
_lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
_lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
_lowerCAmelCase = time.time()
_lowerCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_lowerCAmelCase = self.old_features["""features"""]
_lowerCAmelCase = self.old_features.get("""dataset""" , _lowercase )
_lowerCAmelCase = self.old_features.get("""examples""" , _lowercase )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
_lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
_lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
_lowerCAmelCase , _lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
_lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.features[i]
_lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
_lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
_lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 5 | 0 |
from manim import *
class __snake_case ( _SCREAMING_SNAKE_CASE ):
def UpperCAmelCase__ ( self : List[str]):
lowerCAmelCase_ : List[Any] = Rectangle(height=0.5 , width=0.5)
lowerCAmelCase_ : Optional[int] = Rectangle(height=0.46 , width=0.46).set_stroke(width=0)
lowerCAmelCase_ : List[Any] = [mem.copy() for i in range(6)]
lowerCAmelCase_ : Dict = [mem.copy() for i in range(6)]
lowerCAmelCase_ : int = VGroup(*_lowercase).arrange(_lowercase , buff=0)
lowerCAmelCase_ : Optional[int] = VGroup(*_lowercase).arrange(_lowercase , buff=0)
lowerCAmelCase_ : Optional[int] = VGroup(_lowercase , _lowercase).arrange(_lowercase , buff=0)
lowerCAmelCase_ : Tuple = Text('''CPU''' , font_size=2_4)
lowerCAmelCase_ : Any = Group(_lowercase , _lowercase).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase)
cpu.move_to([-2.5, -0.5, 0])
self.add(_lowercase)
lowerCAmelCase_ : Optional[int] = [mem.copy() for i in range(4)]
lowerCAmelCase_ : Any = VGroup(*_lowercase).arrange(_lowercase , buff=0)
lowerCAmelCase_ : str = Text('''GPU''' , font_size=2_4)
lowerCAmelCase_ : Tuple = Group(_lowercase , _lowercase).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase)
gpu.move_to([-1, -1, 0])
self.add(_lowercase)
lowerCAmelCase_ : List[Any] = [mem.copy() for i in range(6)]
lowerCAmelCase_ : str = VGroup(*_lowercase).arrange(_lowercase , buff=0)
lowerCAmelCase_ : Dict = Text('''Model''' , font_size=2_4)
lowerCAmelCase_ : Dict = Group(_lowercase , _lowercase).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase)
model.move_to([3, -1.0, 0])
self.add(_lowercase)
lowerCAmelCase_ : Dict = []
for i, rect in enumerate(_lowercase):
rect.set_stroke(_lowercase)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
lowerCAmelCase_ : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3).set_stroke(width=0.0).set_fill(_lowercase , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.02 , direction=_lowercase)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_lowercase , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_lowercase , buff=0.0)
self.add(_lowercase)
cpu_targs.append(_lowercase)
lowerCAmelCase_ : Optional[Any] = [mem.copy() for i in range(6)]
lowerCAmelCase_ : Optional[int] = VGroup(*_lowercase).arrange(_lowercase , buff=0)
lowerCAmelCase_ : List[Any] = Text('''Loaded Checkpoint''' , font_size=2_4)
lowerCAmelCase_ : Dict = Group(_lowercase , _lowercase).arrange(_lowercase , aligned_edge=_lowercase , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
lowerCAmelCase_ : Tuple = Square(side_length=2.2)
key.move_to([-5, 2, 0])
lowerCAmelCase_ : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0])
self.add(_lowercase , _lowercase)
lowerCAmelCase_ : Union[str, Any] = MarkupText(
F"""<span fgcolor=\'{BLUE}\'>●</span> Checkpoint""" , font_size=1_8 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left())
lowerCAmelCase_ : Any = MarkupText(
F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=2_4 , )
step_a.move_to([2, 2, 0])
self.play(Write(_lowercase) , Write(_lowercase))
self.play(Write(_lowercase , run_time=1) , Create(_lowercase , run_time=1))
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : List[Any] = []
for i, rect in enumerate(_lowercase):
lowerCAmelCase_ : Tuple = fill.copy().set_fill(_lowercase , opacity=0.7)
target.move_to(_lowercase)
first_animations.append(GrowFromCenter(_lowercase , run_time=1))
lowerCAmelCase_ : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(_lowercase , run_time=1.5))
self.play(*_lowercase)
self.play(*_lowercase)
self.wait()
| 171 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = '''dpr'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase="absolute" , _lowercase = 0 , **_lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=_lowercase , **_lowercase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = projection_dim
_lowerCAmelCase = position_embedding_type
| 5 | 0 |
"""simple docstring"""
import cva
import numpy as np
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if k in (0.0_4, 0.0_6):
A__ = k
A__ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self ) -> Tuple:
return str(self.k )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = cva.imread(_lowercase , 0 )
A__ , A__ = img.shape
A__ = []
A__ = img.copy()
A__ = cva.cvtColor(_lowercase , cva.COLOR_GRAY2RGB )
A__ , A__ = np.gradient(_lowercase )
A__ = dx**2
A__ = dy**2
A__ = dx * dy
A__ = 0.0_4
A__ = self.window_size // 2
for y in range(_lowercase , h - offset ):
for x in range(_lowercase , w - offset ):
A__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
A__ = (wxx * wyy) - (wxy**2)
A__ = wxx + wyy
A__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
UpperCamelCase = HarrisCorner(0.04, 3)
UpperCamelCase , UpperCamelCase = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img)
| 104 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
_lowercase = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
_lowercase = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
_lowercase = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def _lowercase ( self ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def _lowercase ( self , _lowercase , _lowercase , _lowercase=None , _lowercase="uniform_average" , _lowercase=True ):
"""simple docstring"""
_lowerCAmelCase = mean_squared_error(
_lowercase , _lowercase , sample_weight=_lowercase , multioutput=_lowercase , squared=_lowercase )
return {"mse": mse}
| 5 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_SCREAMING_SNAKE_CASE )
class _lowercase ( _SCREAMING_SNAKE_CASE ):
_lowercase : str = field(default='summarization',metadata={'include_in_asdict_even_if_is_default': True} )
_lowercase : ClassVar[Features] = Features({'text': Value('string' )} )
_lowercase : ClassVar[Features] = Features({'summary': Value('string' )} )
_lowercase : str = "text"
_lowercase : str = "summary"
@property
def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return {self.text_column: "text", self.summary_column: "summary"}
| 203 |
'''simple docstring'''
def A ():
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def A (__lowerCamelCase :List[Any] ):
_lowerCAmelCase = 1
_lowerCAmelCase = 2
while i * i <= n:
_lowerCAmelCase = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A ():
return next(i for i in triangle_number_generator() if count_divisors(__lowerCamelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 5 | 0 |
def lowercase ( a , a , a , a , a , a ):
'''simple docstring'''
if index == r:
for j in range(__lowerCamelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
SCREAMING_SNAKE_CASE_ :Optional[Any] = arr[i]
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 , __lowerCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 , __lowerCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
SCREAMING_SNAKE_CASE__ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 631 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , *_lowercase , **_lowercase ):
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 5 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ : Union[str, Any] =logging.getLogger()
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : str = {}
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(__lowerCamelCase, 'all_results.json' )
if os.path.exists(__lowerCamelCase ):
with open(__lowerCamelCase, 'r' ) as f:
SCREAMING_SNAKE_CASE_ : Dict = json.load(__lowerCamelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
lowerCAmelCase__ : Any =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __lowercase (_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def UpperCamelCase__ ( self ):
"""simple docstring"""
import xla_spawn
SCREAMING_SNAKE_CASE_ : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ : int = F'''\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '''.split()
with patch.object(_lowercase , 'argv' , _lowercase ):
SCREAMING_SNAKE_CASE_ : Dict = time()
xla_spawn.main()
SCREAMING_SNAKE_CASE_ : Any = time()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_results(_lowercase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
import xla_spawn
SCREAMING_SNAKE_CASE_ : List[Any] = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_lowercase , 'argv' , _lowercase ):
xla_spawn.main()
| 101 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 5 | 0 |
from manim import *
class __magic_name__ ( _SCREAMING_SNAKE_CASE):
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = Rectangle(height=0.5 ,width=0.5 )
UpperCAmelCase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = VGroup(*_lowercase ).arrange(_lowercase ,buff=0 )
UpperCAmelCase = VGroup(*_lowercase ).arrange(_lowercase ,buff=0 )
UpperCAmelCase = VGroup(_lowercase ,_lowercase ).arrange(_lowercase ,buff=0 )
UpperCAmelCase = Text("CPU" ,font_size=2_4 )
UpperCAmelCase = Group(_lowercase ,_lowercase ).arrange(_lowercase ,buff=0.5 ,aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
UpperCAmelCase = [mem.copy() for i in range(1 )]
UpperCAmelCase = VGroup(*_lowercase ).arrange(_lowercase ,buff=0 )
UpperCAmelCase = Text("GPU" ,font_size=2_4 )
UpperCAmelCase = Group(_lowercase ,_lowercase ).arrange(_lowercase ,buff=0.5 ,aligned_edge=_lowercase )
gpu.align_to(_lowercase ,_lowercase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowercase )
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = VGroup(*_lowercase ).arrange(_lowercase ,buff=0 )
UpperCAmelCase = Text("Model" ,font_size=2_4 )
UpperCAmelCase = Group(_lowercase ,_lowercase ).arrange(_lowercase ,buff=0.5 ,aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowercase ,run_time=1 ) ,Create(_lowercase ,run_time=1 ) ,Create(_lowercase ,run_time=1 ) ,)
UpperCAmelCase = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=2_4 ,)
UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase ,run_time=2.5 ) ,Write(_lowercase ) ,Write(_lowercase ) )
self.add(_lowercase )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for i, rect in enumerate(_lowercase ):
UpperCAmelCase = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowercase ,opacity=0.7 )
cpu_target.move_to(_lowercase )
cpu_target.generate_target()
UpperCAmelCase = 0.46 / 4
UpperCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowercase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowercase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowercase ,buff=0.0 )
cpu_targs.append(_lowercase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowercase ) )
second_animations.append(MoveToTarget(_lowercase ,run_time=1.5 ) )
self.play(*_lowercase )
self.play(*_lowercase )
self.wait()
| 333 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
_lowercase = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
_lowercase = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
_lowercase = BeautifulSoup(res.text, """html.parser""")
_lowercase = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 5 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 500 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def A ():
_lowerCAmelCase = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase = sorted(issue.get_comments() , key=lambda __lowerCamelCase : i.created_at , reverse=__lowerCamelCase )
_lowerCAmelCase = comments[0] if len(__lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 5 | 0 |
'''simple docstring'''
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[Any] , snake_case : Optional[Any]=None , snake_case : str=None , snake_case : Dict=None , **snake_case : Optional[int] ):
"""simple docstring"""
if tokenize_kwargs is None:
_snake_case : List[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
_snake_case : Dict = truncation
_snake_case : Dict = tokenize_kwargs
_snake_case : Tuple = {}
if return_tensors is not None:
_snake_case : Optional[Any] = return_tensors
return preprocess_params, {}, postprocess_params
def __UpperCAmelCase ( self : Any , snake_case : Optional[Any] , **snake_case : Tuple ):
"""simple docstring"""
_snake_case : Optional[Any] = self.framework
_snake_case : Union[str, Any] = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
return model_inputs
def __UpperCAmelCase ( self : List[str] , snake_case : str ):
"""simple docstring"""
_snake_case : str = self.model(**_lowercase )
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , snake_case : Dict , snake_case : Optional[Any]=False ):
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *snake_case : Union[str, Any] , **snake_case : int ):
"""simple docstring"""
return super().__call__(*_lowercase , **_lowercase )
| 517 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 5 | 0 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : Union[str, Any] = logging.get_logger(__name__)
class _a (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__: Any = ['''pixel_values''']
def __init__( self , A__ = True , A__ = 32 , A__=PILImageResampling.BILINEAR , A__ = True , **A__ , ):
A__ : Union[str, Any] = do_resize
A__ : Tuple = do_rescale
A__ : Tuple = size_divisor
A__ : Any = resample
super().__init__(**_lowercase )
def __A ( self , A__ , A__ , A__ , A__ = None , **A__ ):
A__ , A__ : List[str] = get_image_size(_lowercase )
# Rounds the height and width down to the closest multiple of size_divisor
A__ : Dict = height // size_divisor * size_divisor
A__ : Union[str, Any] = width // size_divisor * size_divisor
A__ : Any = resize(_lowercase , (new_h, new_w) , resample=_lowercase , data_format=_lowercase , **_lowercase )
return image
def __A ( self , A__ , A__ , A__ = None , **A__ ):
return rescale(image=_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __A ( self , A__ , A__ = None , A__ = None , A__=None , A__ = None , A__ = None , A__ = ChannelDimension.FIRST , **A__ , ):
A__ : Tuple = do_resize if do_resize is not None else self.do_resize
A__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
A__ : Tuple = size_divisor if size_divisor is not None else self.size_divisor
A__ : Union[str, Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
A__ : Optional[int] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
A__ : List[Any] = [to_numpy_array(_lowercase ) for img in images]
if do_resize:
A__ : str = [self.resize(_lowercase , size_divisor=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
A__ : List[str] = [self.rescale(_lowercase , scale=1 / 255 ) for image in images]
A__ : Optional[int] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A__ : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 456 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 5 | 0 |
"""simple docstring"""
def a ( __snake_case : int, __snake_case : int ):
'''simple docstring'''
while second != 0:
UpperCAmelCase_ :Tuple = first & second
first ^= second
UpperCAmelCase_ :Optional[Any] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase = int(input("Enter the first number: ").strip())
__lowerCamelCase = int(input("Enter the second number: ").strip())
print(f'''{add(first, second) = }''')
| 608 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
_lowercase = logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_lowercase ):
"""simple docstring"""
super().__init__(**_lowercase )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self , _lowercase , **_lowercase ):
"""simple docstring"""
return super().__call__(_lowercase , **_lowercase )
def _lowercase ( self , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _lowercase ( self , _lowercase , _lowercase=None , _lowercase="This is a sound of {}." ):
"""simple docstring"""
if isinstance(_lowercase , _lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowerCAmelCase = requests.get(_lowercase ).content
else:
with open(_lowercase , """rb""" ) as f:
_lowerCAmelCase = f.read()
if isinstance(_lowercase , _lowercase ):
_lowerCAmelCase = ffmpeg_read(_lowercase , self.feature_extractor.sampling_rate )
if not isinstance(_lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
_lowerCAmelCase = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(_lowercase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=self.framework , padding=_lowercase )
_lowerCAmelCase = [text_inputs]
return inputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _lowercase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**_lowercase , **_lowercase )
_lowerCAmelCase = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop("""candidate_labels""" )
_lowerCAmelCase = model_outputs["""logits"""][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=0 )
_lowerCAmelCase = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
_lowerCAmelCase = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_lowercase , _lowercase ) , key=lambda _lowercase : -x[0] )
]
return result
| 5 | 0 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = WavaVecaForSequenceClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = downstream_dict["""projector.weight"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""projector.bias"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.post_net.linear.weight"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.post_net.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Dict , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = WavaVecaForAudioFrameClassification.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.linear.weight"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.linear.bias"""]
return model
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = WavaVecaForXVector.from_pretrained(UpperCamelCase__ , config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = downstream_dict["""connector.weight"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
SCREAMING_SNAKE_CASE__ = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
SCREAMING_SNAKE_CASE__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
SCREAMING_SNAKE_CASE__ = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ = checkpoint["""Downstream"""]
SCREAMING_SNAKE_CASE__ = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(
UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , do_normalize=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
SCREAMING_SNAKE_CASE__ = convert_classification(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith("""ForAudioFrameClassification""" ):
SCREAMING_SNAKE_CASE__ = convert_diarization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif arch.endswith("""ForXVector""" ):
SCREAMING_SNAKE_CASE__ = convert_xvector(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
SCREAMING_SNAKE_CASE__ = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_lowerCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 6 |
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
SCREAMING_SNAKE_CASE__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
SCREAMING_SNAKE_CASE__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE__ = y - 1
SCREAMING_SNAKE_CASE__ = m + 12
# maths var
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] )
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] )
SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 )
SCREAMING_SNAKE_CASE__ = int(c / 4 )
SCREAMING_SNAKE_CASE__ = int(k / 4 )
SCREAMING_SNAKE_CASE__ = int(d + k )
SCREAMING_SNAKE_CASE__ = int(t + u + v + x )
SCREAMING_SNAKE_CASE__ = int(z - (2 * c) )
SCREAMING_SNAKE_CASE__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
_lowerCamelCase = parser.parse_args()
zeller(args.date_input) | 6 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[int] ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
for j in range(i + 1 , UpperCamelCase__ ):
if numbers[j] < numbers[i]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted)) | 6 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
_lowerCamelCase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
_lowerCamelCase = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCamelCase = [0] * args.vocab_size
for k, v in counter.items():
_lowerCamelCase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 6 | 1 |
from __future__ import annotations
class UpperCamelCase_ :
def __init__( self :Optional[Any] , __A :int = 0 ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = key
def _snake_case ( self :Optional[int] , __A :str , __A :int ) -> list[str]:
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
SCREAMING_SNAKE_CASE__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__A ) ^ key ) for ch in content]
def _snake_case ( self :Tuple , __A :str , __A :int ) -> list[str]:
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
SCREAMING_SNAKE_CASE__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__A ) ^ key ) for ch in content]
def _snake_case ( self :int , __A :str , __A :int = 0 ) -> str:
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
SCREAMING_SNAKE_CASE__ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE__ = """"""
for ch in content:
ans += chr(ord(__A ) ^ key )
return ans
def _snake_case ( self :Optional[int] , __A :str , __A :int = 0 ) -> str:
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
SCREAMING_SNAKE_CASE__ = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
SCREAMING_SNAKE_CASE__ = """"""
for ch in content:
ans += chr(ord(__A ) ^ key )
return ans
def _snake_case ( self :Optional[int] , __A :str , __A :int = 0 ) -> bool:
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
try:
with open(__A ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__A , __A ) )
except OSError:
return False
return True
def _snake_case ( self :Any , __A :str , __A :int ) -> bool:
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
try:
with open(__A ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__A , __A ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful") | 6 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
SCREAMING_SNAKE_CASE__ = dict(zip(__A , range(len(__A ) ) ) )
SCREAMING_SNAKE_CASE__ = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
SCREAMING_SNAKE_CASE__ = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , __A )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__A ) + """\n""" )
# load decoder from hub
SCREAMING_SNAKE_CASE__ = """hf-internal-testing/ngram-beam-search-decoder"""
def _snake_case ( self :List[Any] , **__A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.add_kwargs_tokens_map.copy()
kwargs.update(__A )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self :int , **__A :Union[str, Any] ) -> Tuple:
"""simple docstring"""
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self :List[str] , **__A :Union[str, Any] ) -> List[str]:
"""simple docstring"""
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__A )
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = self.get_decoder()
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __A )
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _snake_case ( self :Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(__A , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=__A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _snake_case ( self :List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_decoder()
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
SCREAMING_SNAKE_CASE__ = floats_list((3, 1000) )
SCREAMING_SNAKE_CASE__ = feature_extractor(__A , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processor(__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_decoder()
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
SCREAMING_SNAKE_CASE__ = """This is a test string"""
SCREAMING_SNAKE_CASE__ = processor(text=__A )
SCREAMING_SNAKE_CASE__ = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self :Optional[Any] , __A :List[Any]=(2, 10, 16) , __A :Any=77 ) -> str:
"""simple docstring"""
np.random.seed(__A )
return np.random.rand(*__A )
def _snake_case ( self :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_decoder()
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
SCREAMING_SNAKE_CASE__ = self._get_dummy_logits(shape=(10, 16) , seed=13 )
SCREAMING_SNAKE_CASE__ = processor.decode(__A )
SCREAMING_SNAKE_CASE__ = decoder.decode_beams(__A )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def _snake_case ( self :List[Any] , __A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_decoder()
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
SCREAMING_SNAKE_CASE__ = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__A )
else:
with get_context(__A ).Pool() as pool:
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__A , __A )
SCREAMING_SNAKE_CASE__ = list(__A )
with get_context("""fork""" ).Pool() as p:
SCREAMING_SNAKE_CASE__ = decoder.decode_beams_batch(__A , __A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__A , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(__A , decoded_processor.logit_score )
self.assertListEqual(__A , decoded_processor.lm_score )
def _snake_case ( self :List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_decoder()
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
SCREAMING_SNAKE_CASE__ = self._get_dummy_logits()
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = -2_0.0
SCREAMING_SNAKE_CASE__ = -4.0
SCREAMING_SNAKE_CASE__ = processor.batch_decode(
__A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
SCREAMING_SNAKE_CASE__ = decoded_processor_out.text
SCREAMING_SNAKE_CASE__ = list(__A )
with get_context("""fork""" ).Pool() as pool:
SCREAMING_SNAKE_CASE__ = decoder.decode_beams_batch(
__A , __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , )
SCREAMING_SNAKE_CASE__ = [d[0][0] for d in decoded_decoder_out]
SCREAMING_SNAKE_CASE__ = [d[0][2] for d in decoded_decoder_out]
SCREAMING_SNAKE_CASE__ = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , __A )
self.assertTrue(np.array_equal(__A , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , __A , atol=1E-3 ) )
self.assertTrue(np.array_equal(__A , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , __A , atol=1E-3 ) )
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_decoder()
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
SCREAMING_SNAKE_CASE__ = self._get_dummy_logits()
SCREAMING_SNAKE_CASE__ = 2.0
SCREAMING_SNAKE_CASE__ = 5.0
SCREAMING_SNAKE_CASE__ = -2_0.0
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = processor.batch_decode(
__A , alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
SCREAMING_SNAKE_CASE__ = decoded_processor_out.text
SCREAMING_SNAKE_CASE__ = list(__A )
decoder.reset_params(
alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , )
with get_context("""fork""" ).Pool() as pool:
SCREAMING_SNAKE_CASE__ = decoder.decode_beams_batch(
__A , __A , )
SCREAMING_SNAKE_CASE__ = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__A , __A )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , __A )
SCREAMING_SNAKE_CASE__ = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , __A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
SCREAMING_SNAKE_CASE__ = processor.decoder.model_container[processor.decoder._model_key]
SCREAMING_SNAKE_CASE__ = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
SCREAMING_SNAKE_CASE__ = os.listdir(__A )
SCREAMING_SNAKE_CASE__ = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__A , __A )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = snapshot_download("""hf-internal-testing/processor_with_lm""" )
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = processor.decoder.model_container[processor.decoder._model_key]
SCREAMING_SNAKE_CASE__ = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
SCREAMING_SNAKE_CASE__ = os.listdir(__A )
SCREAMING_SNAKE_CASE__ = os.listdir(__A )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__A , __A )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
SCREAMING_SNAKE_CASE__ = floats_list((3, 1000) )
SCREAMING_SNAKE_CASE__ = processor_wavaveca(__A , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processor_auto(__A , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
SCREAMING_SNAKE_CASE__ = self._get_dummy_logits()
SCREAMING_SNAKE_CASE__ = processor_wavaveca.batch_decode(__A )
SCREAMING_SNAKE_CASE__ = processor_auto.batch_decode(__A )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _snake_case ( self :Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_feature_extractor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_decoder()
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def _snake_case ( __A :Any , __A :Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [d[key] for d in offsets]
return retrieved_list
def _snake_case ( self :List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
SCREAMING_SNAKE_CASE__ = self._get_dummy_logits()[0]
SCREAMING_SNAKE_CASE__ = processor.decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
SCREAMING_SNAKE_CASE__ = self._get_dummy_logits()
SCREAMING_SNAKE_CASE__ = processor.batch_decode(__A , output_word_offsets=__A )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(__A , __A ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(__A , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _snake_case ( self :int ) -> int:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE__ = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=__A )
SCREAMING_SNAKE_CASE__ = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6000 ) )
SCREAMING_SNAKE_CASE__ = iter(__A )
SCREAMING_SNAKE_CASE__ = next(__A )
SCREAMING_SNAKE_CASE__ = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
SCREAMING_SNAKE_CASE__ = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
SCREAMING_SNAKE_CASE__ = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A ).logits.cpu().numpy()
SCREAMING_SNAKE_CASE__ = processor.decode(logits[0] , output_word_offsets=__A )
SCREAMING_SNAKE_CASE__ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
SCREAMING_SNAKE_CASE__ = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
SCREAMING_SNAKE_CASE__ = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(__A , """word""" ) ) , __A )
self.assertEqual(""" """.join(self.get_from_offsets(__A , """word""" ) ) , output.text )
# output times
SCREAMING_SNAKE_CASE__ = torch.tensor(self.get_from_offsets(__A , """start_time""" ) )
SCREAMING_SNAKE_CASE__ = torch.tensor(self.get_from_offsets(__A , """end_time""" ) )
# fmt: off
SCREAMING_SNAKE_CASE__ = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] )
SCREAMING_SNAKE_CASE__ = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] )
# fmt: on
self.assertTrue(torch.allclose(__A , __A , atol=0.0_1 ) )
self.assertTrue(torch.allclose(__A , __A , atol=0.0_1 ) ) | 6 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "OwlViTImageProcessor"
lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
SCREAMING_SNAKE_CASE__ = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A ))
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = input_ids
SCREAMING_SNAKE_CASE__ = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
SCREAMING_SNAKE_CASE__ = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process(*__A , **__A )
def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*__A , **__A )
def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE__ = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE__ = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.encoder(__A )
SCREAMING_SNAKE_CASE__ = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A )
else:
SCREAMING_SNAKE_CASE__ = h
SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A )
SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sample
SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents
SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A ) | 6 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list[float] , UpperCamelCase__: list[float] ):
SCREAMING_SNAKE_CASE__ = sorted(numsa + numsa )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = divmod(len(UpperCamelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = [float(x) for x in input('Enter the elements of first array: ').split()]
_lowerCamelCase = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''') | 6 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = jnp.floataa
lowerCamelCase_ = True
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype )
def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A )
SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ):
SCREAMING_SNAKE_CASE__ = logits.shape[-1]
SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" )
SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 )
SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ )
return loss
SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = "google/bigbird-roberta-base"
lowerCamelCase_ = 30_00
lowerCamelCase_ = 1_05_00
lowerCamelCase_ = 1_28
lowerCamelCase_ = 3
lowerCamelCase_ = 1
lowerCamelCase_ = 5
# tx_args
lowerCamelCase_ = 3e-5
lowerCamelCase_ = 0.0
lowerCamelCase_ = 2_00_00
lowerCamelCase_ = 0.0095
lowerCamelCase_ = "bigbird-roberta-natural-questions"
lowerCamelCase_ = "training-expt"
lowerCamelCase_ = "data/nq-training.jsonl"
lowerCamelCase_ = "data/nq-validation.jsonl"
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=__A )
SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir )
SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 40_96 # no dynamic padding on TPUs
def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.collate_fn(__A )
SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A )
return batch
def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": jnp.array(__A , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(__A , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _snake_case ( self :Tuple , __A :list ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids]
return zip(*__A )
def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )]
while len(__A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ):
if seed is not None:
SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) // batch_size ):
SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase__ )
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
def loss_fn(UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
return state.loss_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" )
SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class UpperCamelCase_ ( train_state.TrainState ):
lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = None
def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model.params
SCREAMING_SNAKE_CASE__ = TrainState.create(
apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , )
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A )
SCREAMING_SNAKE_CASE__ = train_state.TrainState(
step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = data_collator
SCREAMING_SNAKE_CASE__ = lr
SCREAMING_SNAKE_CASE__ = params
SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A )
return state
def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.args
SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() )
for epoch in range(args.max_epochs ):
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step )
SCREAMING_SNAKE_CASE__ = running_loss.item() / i
SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 )
SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(__A ) )
self.logger.log(__A , commit=__A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A )
def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size )
SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(__A , params=state.params )
with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) )
with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , __A )
print("""DONE""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
def weight_decay_mask(UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ )
return tx, lr | 6 | 1 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCamelCase = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
_lowerCamelCase = dataset.iloc[:, 1:2].values
_lowerCamelCase = dataset.iloc[:, 2].values
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCamelCase = PolynomialFeatures(degree=4)
_lowerCamelCase = poly_reg.fit_transform(X)
_lowerCamelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def SCREAMING_SNAKE_CASE__ ( ):
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color="""red""" )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003 | 6 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 6 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowerCamelCase = ['text', 'image', 'audio']
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
inputs.append(create_inputs(UpperCamelCase__ ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List ):
SCREAMING_SNAKE_CASE__ = []
for output in outputs:
if isinstance(UpperCamelCase__ , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(UpperCamelCase__ , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(UpperCamelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class UpperCamelCase_ :
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
SCREAMING_SNAKE_CASE__ = self.tool.inputs
for _input in inputs:
if isinstance(_input , __A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
SCREAMING_SNAKE_CASE__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ = self.tool(*__A )
# There is a single output
if len(self.tool.outputs ) == 1:
SCREAMING_SNAKE_CASE__ = [outputs]
self.assertListEqual(output_types(__A ) , self.tool.outputs )
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def _snake_case ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ = self.tool(*__A )
if not isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = [outputs]
self.assertEqual(len(__A ) , len(self.tool.outputs ) )
for output, output_type in zip(__A , self.tool.outputs ):
SCREAMING_SNAKE_CASE__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__A , __A ) )
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = create_inputs(self.tool.inputs )
SCREAMING_SNAKE_CASE__ = []
for _input, input_type in zip(__A , self.tool.inputs ):
if isinstance(__A , __A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
SCREAMING_SNAKE_CASE__ = self.tool(*__A )
if not isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = [outputs]
self.assertEqual(len(__A ) , len(self.tool.outputs ) ) | 6 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 847
SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 150
SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 171
SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE__ = 133
SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 65
SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = val
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ):
SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ )
# load original state_dict
with open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase__ , param.shape )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE__ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE__ = 65
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE__ = 65_535
else:
SCREAMING_SNAKE_CASE__ = 255
SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False
SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 6 | 1 |
from __future__ import annotations
class UpperCamelCase_ :
def __init__( self :List[str] , __A :str , __A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = text, pattern
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = len(__A ), len(__A )
def _snake_case ( self :Optional[int] , __A :str ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _snake_case ( self :Optional[Any] , __A :int ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _snake_case ( self :List[str] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE__ = self.mismatch_in_text(__A )
if mismatch_index == -1:
positions.append(__A )
else:
SCREAMING_SNAKE_CASE__ = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowerCamelCase = 'ABAABA'
_lowerCamelCase = 'AB'
_lowerCamelCase = BoyerMooreSearch(text, pattern)
_lowerCamelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions) | 6 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase = 0
_lowerCamelCase = 0XE0_00
_lowerCamelCase = 0XE0_01
_lowerCamelCase = 0XE0_02
_lowerCamelCase = 0XE0_03
_lowerCamelCase = 0XE0_04
# Maps special codepoints to human-readable names.
_lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def _snake_case ( self :Tuple , __A :str ) -> List[str]:
"""simple docstring"""
return list(__A )
def _snake_case ( self :Optional[Any] , __A :str ) -> int:
"""simple docstring"""
try:
return ord(__A )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _snake_case ( self :str , __A :int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__A )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any:
"""simple docstring"""
return "".join(__A )
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
result += ([0] * len(__A )) + [1]
return result
def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any:
"""simple docstring"""
return () | 6 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase_ = Features({"text": Value("string" )} )
lowerCamelCase_ = Features({"labels": ClassLabel} )
lowerCamelCase_ = "text"
lowerCamelCase_ = "labels"
def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def _snake_case ( self :str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
} | 6 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase = 'bert-base-cased'
_lowerCamelCase = 'fp16'
_lowerCamelCase = 'bf16'
_lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = f'''{i + 1}'''
SCREAMING_SNAKE_CASE__ = strategy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = prefetch_policy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = state_dict_type
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """2000"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """T5Layer"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """0"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = mp_dtype
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ = torch.bfloataa
SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = str(__A ).lower()
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = 0.8_2
SCREAMING_SNAKE_CASE__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 6 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self :str , __A :List[Any] , __A :Optional[Any]=7 , __A :int=3 , __A :int=10 , __A :Dict=18 , __A :str=30 , __A :Any=400 , __A :Dict=True , __A :Any=None , __A :List[str]=True , __A :Dict=[0.5, 0.5, 0.5] , __A :List[str]=[0.5, 0.5, 0.5] , __A :List[str]=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""shortest_edge""": 18}
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = num_frames
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = image_mean
SCREAMING_SNAKE_CASE__ = image_std
SCREAMING_SNAKE_CASE__ = crop_size
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = VivitImageProcessor if is_vision_available() else None
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = VivitImageProcessingTester(self )
@property
def _snake_case ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_center_crop""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A )
for video in video_inputs:
self.assertIsInstance(__A , __A )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for video in video_inputs:
self.assertIsInstance(__A , __A )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self :int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_video_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for video in video_inputs:
self.assertIsInstance(__A , __A )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(video_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 6 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'PoolFormerConfig'
# Base docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE__ = 1 - drop_prob
SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor
return output
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = drop_prob
def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__A , self.drop_prob , self.training )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity()
def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.projection(__A )
SCREAMING_SNAKE_CASE__ = self.norm(__A )
return embeddings
class UpperCamelCase_ ( nn.GroupNorm ):
def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(1 , __A , **__A )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[str] , __A :Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.pool(__A ) - hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ = config.hidden_act
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.act_fn(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
# Useful for training neural nets
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE__ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str:
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE__ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) )
SCREAMING_SNAKE_CASE__ = hidden_states + layer_output
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
# Transformer blocks
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE__ = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = blk(__A )
SCREAMING_SNAKE_CASE__ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PoolFormerConfig
lowerCamelCase_ = "poolformer"
lowerCamelCase_ = "pixel_values"
lowerCamelCase_ = True
def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict:
"""simple docstring"""
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = value
_lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Any ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
SCREAMING_SNAKE_CASE__ = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :int , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self :List[Any] , __A :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :str , __A :Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A )
# Final norm
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = outputs[0]
SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states ) | 6 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowerCamelCase = data_utils.TransfoXLTokenizer
_lowerCamelCase = data_utils.TransfoXLCorpus
_lowerCamelCase = data_utils
_lowerCamelCase = data_utils
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: int , UpperCamelCase__: List[str] , UpperCamelCase__: List[Any] ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCamelCase__ , """rb""" ) as fp:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' )
SCREAMING_SNAKE_CASE__ = corpus.vocab.__dict__
torch.save(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.abspath(UpperCamelCase__ )
print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
SCREAMING_SNAKE_CASE__ = TransfoXLConfig()
else:
SCREAMING_SNAKE_CASE__ = TransfoXLConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = TransfoXLLMHeadModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = load_tf_weights_in_transfo_xl(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Save PyTorch model to {os.path.abspath(UpperCamelCase__ )}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(f'''Save configuration file to {os.path.abspath(UpperCamelCase__ )}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_lowerCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 6 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_lengths
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = gelu_activation
SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ = causal
SCREAMING_SNAKE_CASE__ = asm
SCREAMING_SNAKE_CASE__ = n_langs
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_special
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = summary_type
SCREAMING_SNAKE_CASE__ = use_proj
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__A )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 6 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ""
lowerCamelCase_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCamelCase_ = None # compression type in fsspec. ex: "gzip"
lowerCamelCase_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self :Optional[int] , __A :str = "" , __A :Optional[str] = None , __A :Optional[dict] = None , **__A :List[str] ) -> Any:
"""simple docstring"""
super().__init__(self , **__A )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
SCREAMING_SNAKE_CASE__ = fsspec.open(
__A , mode="""rb""" , protocol=__A , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
SCREAMING_SNAKE_CASE__ = os.path.basename(self.file.path.split("""::""" )[0] )
SCREAMING_SNAKE_CASE__ = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
SCREAMING_SNAKE_CASE__ = None
@classmethod
def _snake_case ( cls :Any , __A :Tuple ) -> List[str]:
"""simple docstring"""
return super()._strip_protocol(__A ).lstrip("""/""" )
def _snake_case ( self :Union[str, Any] ) -> Tuple:
"""simple docstring"""
if self.dir_cache is None:
SCREAMING_SNAKE_CASE__ = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
SCREAMING_SNAKE_CASE__ = {f["""name"""]: f}
def _snake_case ( self :Optional[int] , __A :str ) -> str:
"""simple docstring"""
return self.file.open().read()
def _snake_case ( self :List[str] , __A :str , __A :str = "rb" , __A :int=None , __A :List[str]=True , __A :Optional[Any]=None , **__A :Union[str, Any] , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._strip_protocol(__A )
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "bz2"
lowerCamelCase_ = "bz2"
lowerCamelCase_ = ".bz2"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "gzip"
lowerCamelCase_ = "gzip"
lowerCamelCase_ = ".gz"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "lz4"
lowerCamelCase_ = "lz4"
lowerCamelCase_ = ".lz4"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "xz"
lowerCamelCase_ = "xz"
lowerCamelCase_ = ".xz"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "zstd"
lowerCamelCase_ = "zstd"
lowerCamelCase_ = ".zst"
def __init__( self :List[Any] , __A :str , __A :str = "rb" , __A :Optional[str] = None , __A :Optional[dict] = None , __A :int = DEFAULT_BLOCK_SIZE , **__A :Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
fo=__A , mode=__A , target_protocol=__A , target_options=__A , block_size=__A , **__A , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
SCREAMING_SNAKE_CASE__ = self.file.__enter__
class UpperCamelCase_ :
def __init__( self :int , __A :Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = file_
def __enter__( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self :Optional[Any] , *__A :List[Any] , **__A :int ) -> Tuple:
"""simple docstring"""
self._file.__exit__(*__A , **__A )
def __iter__( self :Any ) -> Optional[int]:
"""simple docstring"""
return iter(self._file )
def _snake_case ( self :Dict ) -> Dict:
"""simple docstring"""
return next(self._file )
def __getattr__( self :Union[str, Any] , __A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return getattr(self._file , __A )
def fixed_enter(*__A :List[Any] , **__A :Optional[int] ):
return WrappedFile(_enter(*__A , **__A ) )
SCREAMING_SNAKE_CASE__ = fixed_enter | 6 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ):
model.train()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE__ = RegressionModel()
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 6 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "LayoutLMv3ImageProcessor"
lowerCamelCase_ = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self :Dict , __A :Union[str, Any]=None , __A :Union[str, Any]=None , **__A :Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :Dict , __A :List[str] , __A :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A :Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __A :Union[List[List[int]], List[List[List[int]]]] = None , __A :Optional[Union[List[int], List[List[int]]]] = None , __A :bool = True , __A :Union[bool, str, PaddingStrategy] = False , __A :Union[bool, str, TruncationStrategy] = None , __A :Optional[int] = None , __A :int = 0 , __A :Optional[int] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = True , __A :Optional[Union[str, TensorType]] = None , **__A :Optional[Any] , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
SCREAMING_SNAKE_CASE__ = self.image_processor(images=__A , return_tensors=__A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
SCREAMING_SNAKE_CASE__ = features["""words"""]
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel values
SCREAMING_SNAKE_CASE__ = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
SCREAMING_SNAKE_CASE__ = self.get_overflowing_images(__A , encoded_inputs["""overflow_to_sample_mapping"""] )
SCREAMING_SNAKE_CASE__ = images
return encoded_inputs
def _snake_case ( self :str , __A :Optional[int] , __A :int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__A ) != len(__A ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f''' {len(__A )} and {len(__A )}''' )
return images_with_overflow
def _snake_case ( self :List[Any] , *__A :int , **__A :Optional[int] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Tuple , *__A :int , **__A :str ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _snake_case ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "AutoImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict:
"""simple docstring"""
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"] | 6 | 1 |
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = {}
with open(UpperCamelCase__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE__ = []
_list.append([line.split()[1], line.split()[2]] )
SCREAMING_SNAKE_CASE__ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE__ = []
_list.append([line.split()[0], line.split()[2]] )
SCREAMING_SNAKE_CASE__ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: int ):
with open(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.read(1 )
SCREAMING_SNAKE_CASE__ = start_node
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = start_node
SCREAMING_SNAKE_CASE__ = 0
while visiting not in first_solution:
SCREAMING_SNAKE_CASE__ = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase__ ) and k[0] not in first_solution:
SCREAMING_SNAKE_CASE__ = k[1]
SCREAMING_SNAKE_CASE__ = k[0]
first_solution.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = distance_of_first_solution + int(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = best_node
first_solution.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
SCREAMING_SNAKE_CASE__ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = []
for n in solution[1:-1]:
SCREAMING_SNAKE_CASE__ = solution.index(UpperCamelCase__ )
for kn in solution[1:-1]:
SCREAMING_SNAKE_CASE__ = solution.index(UpperCamelCase__ )
if n == kn:
continue
SCREAMING_SNAKE_CASE__ = copy.deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = kn
SCREAMING_SNAKE_CASE__ = n
SCREAMING_SNAKE_CASE__ = 0
for k in _tmp[:-1]:
SCREAMING_SNAKE_CASE__ = _tmp[_tmp.index(UpperCamelCase__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
SCREAMING_SNAKE_CASE__ = distance + int(i[1] )
_tmp.append(UpperCamelCase__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
SCREAMING_SNAKE_CASE__ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCamelCase__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = first_solution
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = distance_of_first_solution
SCREAMING_SNAKE_CASE__ = solution
while count <= iters:
SCREAMING_SNAKE_CASE__ = find_neighborhood(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution]
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ ) - 1
SCREAMING_SNAKE_CASE__ = False
while not found:
SCREAMING_SNAKE_CASE__ = 0
while i < len(UpperCamelCase__ ):
if best_solution[i] != solution[i]:
SCREAMING_SNAKE_CASE__ = best_solution[i]
SCREAMING_SNAKE_CASE__ = solution[i]
break
SCREAMING_SNAKE_CASE__ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = best_solution[:-1]
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
SCREAMING_SNAKE_CASE__ = cost
SCREAMING_SNAKE_CASE__ = solution
else:
SCREAMING_SNAKE_CASE__ = index_of_best_solution + 1
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution]
if len(UpperCamelCase__ ) >= size:
tabu_list.pop(0 )
SCREAMING_SNAKE_CASE__ = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int]=None ):
SCREAMING_SNAKE_CASE__ = generate_neighbours(args.File )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = generate_first_solution(
args.File , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tabu_search(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args()) | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE__ = s - 2 * j
break
return diff | 6 | 1 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
_lowerCamelCase = TypeVar('KT')
_lowerCamelCase = TypeVar('VT')
class UpperCamelCase_ ( Generic[KT, VT] ):
def __init__( self :List[str] , __A :KT | str = "root" , __A :VT | None = None ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = key
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = []
def __repr__( self :Optional[Any] ) -> str:
"""simple docstring"""
return f'''Node({self.key}: {self.value})'''
@property
def _snake_case ( self :str ) -> int:
"""simple docstring"""
return len(self.forward )
class UpperCamelCase_ ( Generic[KT, VT] ):
def __init__( self :Union[str, Any] , __A :float = 0.5 , __A :int = 16 ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Node[KT, VT]()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = p
SCREAMING_SNAKE_CASE__ = max_level
def __str__( self :Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = list(self )
if len(__A ) == 0:
return f'''SkipList(level={self.level})'''
SCREAMING_SNAKE_CASE__ = max((len(str(__A ) ) for item in items) , default=4 )
SCREAMING_SNAKE_CASE__ = max(__A , 4 ) + 4
SCREAMING_SNAKE_CASE__ = self.head
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(__A , """-""" ) + """* """ * len(__A ) )
lines.append(""" """ * label_size + """| """ * len(__A ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE__ = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(__A , """-""" )
+ """ """.join(str(n.key ) if n.key == node.key else """|""" for n in forwards ) )
lines.append(""" """ * label_size + """| """ * len(__A ) )
SCREAMING_SNAKE_CASE__ = node.forward
lines.append("""None""".ljust(__A ) + """* """ * len(__A ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(__A )
def __iter__( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE__ = node.forward[0]
def _snake_case ( self :str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _snake_case ( self :Dict , __A :Optional[int] ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE__ = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__A )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _snake_case ( self :Optional[int] , __A :KT ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._locate_node(__A )
if node is not None:
for i, update_node in enumerate(__A ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE__ = node.forward[i]
else:
SCREAMING_SNAKE_CASE__ = update_node.forward[:i]
def _snake_case ( self :List[Any] , __A :KT , __A :VT ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._locate_node(__A )
if node is not None:
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __A ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE__ = level
SCREAMING_SNAKE_CASE__ = Node(__A , __A )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__A )
else:
SCREAMING_SNAKE_CASE__ = new_node
def _snake_case ( self :Optional[Any] , __A :VT ) -> VT | None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._locate_node(__A )
if node is not None:
return node.value
return None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert("""Key1""" , 3 )
skip_list.insert("""Key2""" , 12 )
skip_list.insert("""Key3""" , 41 )
skip_list.insert("""Key4""" , -19 )
SCREAMING_SNAKE_CASE__ = skip_list.head
SCREAMING_SNAKE_CASE__ = {}
while node.level != 0:
SCREAMING_SNAKE_CASE__ = node.forward[0]
SCREAMING_SNAKE_CASE__ = node.value
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert("""Key1""" , 10 )
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""Key5""" , 7 )
skip_list.insert("""Key7""" , 10 )
skip_list.insert("""Key10""" , 5 )
skip_list.insert("""Key7""" , 7 )
skip_list.insert("""Key5""" , 5 )
skip_list.insert("""Key10""" , 10 )
SCREAMING_SNAKE_CASE__ = skip_list.head
SCREAMING_SNAKE_CASE__ = {}
while node.level != 0:
SCREAMING_SNAKE_CASE__ = node.forward[0]
SCREAMING_SNAKE_CASE__ = node.value
if len(UpperCamelCase__ ) != 4:
print()
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = SkipList()
assert skip_list.find("""Some key""" ) is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert("""Key2""" , 20 )
assert skip_list.find("""Key2""" ) == 20
skip_list.insert("""Some Key""" , 10 )
skip_list.insert("""Key2""" , 8 )
skip_list.insert("""V""" , 13 )
assert skip_list.find("""Y""" ) is None
assert skip_list.find("""Key2""" ) == 8
assert skip_list.find("""Some Key""" ) == 10
assert skip_list.find("""V""" ) == 13
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.delete("""Some key""" )
assert len(skip_list.head.forward ) == 0
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""Key2""" ) is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 14 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""V""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) == 14
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""X""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) == 12
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key1""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) == 15
skip_list.delete("""Key2""" )
assert skip_list.find("""V""" ) is None
assert skip_list.find("""X""" ) is None
assert skip_list.find("""Key1""" ) is None
assert skip_list.find("""Key2""" ) is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert("""Key1""" , 12 )
skip_list.insert("""V""" , 13 )
skip_list.insert("""X""" , 142 )
skip_list.insert("""Key2""" , 15 )
skip_list.delete("""X""" )
def traverse_keys(UpperCamelCase__: List[Any] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(UpperCamelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def SCREAMING_SNAKE_CASE__ ( ):
def is_sorted(UpperCamelCase__: str ):
return all(next_item >= item for item, next_item in zip(UpperCamelCase__ , lst[1:] ) )
SCREAMING_SNAKE_CASE__ = SkipList()
for i in range(10 ):
skip_list.insert(UpperCamelCase__ , UpperCamelCase__ )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = SkipList()
skip_list.insert(2 , """2""" )
skip_list.insert(4 , """4""" )
skip_list.insert(6 , """4""" )
skip_list.insert(4 , """5""" )
skip_list.insert(8 , """4""" )
skip_list.insert(9 , """4""" )
skip_list.delete(4 )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 6 | 1 |
import operator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list , UpperCamelCase__: bool = False , UpperCamelCase__: list | None = None ):
SCREAMING_SNAKE_CASE__ = operator.lt if reverse else operator.gt
SCREAMING_SNAKE_CASE__ = solution or []
if not arr:
return solution
SCREAMING_SNAKE_CASE__ = [arr.pop(0 )]
for i, item in enumerate(UpperCamelCase__ ):
if _operator(UpperCamelCase__ , sublist[-1] ):
sublist.append(UpperCamelCase__ )
arr.pop(UpperCamelCase__ )
# merging sublist into solution list
if not solution:
solution.extend(UpperCamelCase__ )
else:
while sublist:
SCREAMING_SNAKE_CASE__ = sublist.pop(0 )
for i, xx in enumerate(UpperCamelCase__ ):
if not _operator(UpperCamelCase__ , UpperCamelCase__ ):
solution.insert(UpperCamelCase__ , UpperCamelCase__ )
break
else:
solution.append(UpperCamelCase__ )
strand_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1] | 6 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 6 | 1 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: ndarray ):
return np.dot(UpperCamelCase__ , UpperCamelCase__ )
class UpperCamelCase_ :
def __init__( self :int , *,
__A :float = np.inf , __A :str = "linear" , __A :float = 0.0 , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = regularization
SCREAMING_SNAKE_CASE__ = gamma
if kernel == "linear":
SCREAMING_SNAKE_CASE__ = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
SCREAMING_SNAKE_CASE__ = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
SCREAMING_SNAKE_CASE__ = f'''Unknown kernel: {kernel}'''
raise ValueError(__A )
def _snake_case ( self :Optional[Any] , __A :ndarray , __A :ndarray ) -> float:
"""simple docstring"""
return np.dot(__A , __A )
def _snake_case ( self :Union[str, Any] , __A :ndarray , __A :ndarray ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _snake_case ( self :Union[str, Any] , __A :list[ndarray] , __A :ndarray ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = observations
SCREAMING_SNAKE_CASE__ = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((SCREAMING_SNAKE_CASE__) , ) = np.shape(__A )
def to_minimize(__A :ndarray ) -> float:
SCREAMING_SNAKE_CASE__ = 0
((SCREAMING_SNAKE_CASE__) , ) = np.shape(__A )
for i in range(__A ):
for j in range(__A ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(__A )
SCREAMING_SNAKE_CASE__ = LinearConstraint(__A , 0 , 0 )
SCREAMING_SNAKE_CASE__ = Bounds(0 , self.regularization )
SCREAMING_SNAKE_CASE__ = minimize(
__A , np.ones(__A ) , bounds=__A , constraints=[ly_contraint] ).x
SCREAMING_SNAKE_CASE__ = l_star
# calculating mean offset of separation plane to points
SCREAMING_SNAKE_CASE__ = 0
for i in range(__A ):
for j in range(__A ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
SCREAMING_SNAKE_CASE__ = s / n
def _snake_case ( self :Optional[int] , __A :ndarray ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , __A )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase_ = Features({"text": Value("string" )} )
lowerCamelCase_ = Features({"labels": ClassLabel} )
lowerCamelCase_ = "text"
lowerCamelCase_ = "labels"
def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def _snake_case ( self :str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
} | 6 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "microsoft/speecht5_tts"
lowerCamelCase_ = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
lowerCamelCase_ = "text_reader"
lowerCamelCase_ = SpeechTaProcessor
lowerCamelCase_ = SpeechTaForTextToSpeech
lowerCamelCase_ = SpeechTaHifiGan
lowerCamelCase_ = ["text"]
lowerCamelCase_ = ["audio"]
def _snake_case ( self :Dict ) -> List[str]:
"""simple docstring"""
if self.post_processor is None:
SCREAMING_SNAKE_CASE__ = """microsoft/speecht5_hifigan"""
super().setup()
def _snake_case ( self :List[Any] , __A :Union[str, Any] , __A :Optional[Any]=None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.pre_processor(text=__A , return_tensors="""pt""" , truncation=__A )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
SCREAMING_SNAKE_CASE__ = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
SCREAMING_SNAKE_CASE__ = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _snake_case ( self :Any , __A :Tuple ) -> Optional[Any]:
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**__A )
def _snake_case ( self :int , __A :int ) -> List[Any]:
"""simple docstring"""
with torch.no_grad():
return self.post_processor(__A ).cpu().detach() | 6 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = model.config
SCREAMING_SNAKE_CASE__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
SCREAMING_SNAKE_CASE__ = MBartConfig(
is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
if "encoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
SCREAMING_SNAKE_CASE__ = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_split[3] )
SCREAMING_SNAKE_CASE__ = int(key_split[5] )
SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ):
# load original model
SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" )
SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
SCREAMING_SNAKE_CASE__ = """When is the coffee break?"""
SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[
"""input_ids"""
]
SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
_lowerCamelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 6 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = ShapEPipeline
lowerCamelCase_ = ["prompt"]
lowerCamelCase_ = ["prompt"]
lowerCamelCase_ = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowerCamelCase_ = False
@property
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
return 32
@property
def _snake_case ( self :str ) -> List[Any]:
"""simple docstring"""
return 32
@property
def _snake_case ( self :Tuple ) -> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
return 8
@property
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _snake_case ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__A )
@property
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
SCREAMING_SNAKE_CASE__ = PriorTransformer(**__A )
return model
@property
def _snake_case ( self :str ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE__ = ShapERenderer(**__A )
return model
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dummy_prior
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_renderer
SCREAMING_SNAKE_CASE__ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=__A , clip_sample=__A , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE__ = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _snake_case ( self :List[str] , __A :List[Any] , __A :List[Any]=0 ) -> str:
"""simple docstring"""
if str(__A ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(__A )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(__A )
SCREAMING_SNAKE_CASE__ = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _snake_case ( self :Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE__ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(__A ) )
SCREAMING_SNAKE_CASE__ = output.images[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :Any ) -> Dict:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch_device == """cpu"""
SCREAMING_SNAKE_CASE__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__A , relax_max_difference=__A , )
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__A )
SCREAMING_SNAKE_CASE__ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__A )
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE__ = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE__ = pipe(**__A , num_images_per_prompt=__A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
SCREAMING_SNAKE_CASE__ = ShapEPipeline.from_pretrained("""openai/shap-e""" )
SCREAMING_SNAKE_CASE__ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
"""a shark""" , generator=__A , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__A , __A ) | 6 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 6 | 1 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 847
SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 150
SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 171
SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE__ = 133
SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 65
SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = val
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ):
SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ )
# load original state_dict
with open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase__ , param.shape )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE__ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE__ = 65
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE__ = 65_535
else:
SCREAMING_SNAKE_CASE__ = 255
SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False
SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE__ = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE__ = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''') | 6 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Optional[int] , *__A :Optional[Any] , **__A :Optional[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A ) | 6 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) | 6 | 1 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :int , *__A :int , **__A :Any ) -> None:
"""simple docstring"""
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A ) | 6 |
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
SCREAMING_SNAKE_CASE__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
SCREAMING_SNAKE_CASE__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE__ = y - 1
SCREAMING_SNAKE_CASE__ = m + 12
# maths var
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] )
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] )
SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 )
SCREAMING_SNAKE_CASE__ = int(c / 4 )
SCREAMING_SNAKE_CASE__ = int(k / 4 )
SCREAMING_SNAKE_CASE__ = int(d + k )
SCREAMING_SNAKE_CASE__ = int(t + u + v + x )
SCREAMING_SNAKE_CASE__ = int(z - (2 * c) )
SCREAMING_SNAKE_CASE__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
_lowerCamelCase = parser.parse_args()
zeller(args.date_input) | 6 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Optional[Any] , __A :int ) -> Union[str, Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
SCREAMING_SNAKE_CASE__ = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__A , multi_process=__A , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__A )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case ( self :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """sgugger/tiny-distilbert-classification"""
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , only_pretrain_model=__A , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__A )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__A )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case ( self :int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__A , multi_process=__A , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__A , [config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case ( self :List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__A , [config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__A )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _snake_case ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__A , [config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _snake_case ( self :int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """patrickvonplaten/t5-tiny-random"""
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__A , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__A , configs=[config] )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , """Cannot do xla on CPU.""" )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__A , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__A , multi_process=__A , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__A )
SCREAMING_SNAKE_CASE__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__A , save_to_csv=__A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__A , """inf_time.csv""" ) , inference_memory_csv_file=os.path.join(__A , """inf_mem.csv""" ) , env_info_csv_file=os.path.join(__A , """env.csv""" ) , multi_process=__A , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__A )
benchmark.run()
self.assertTrue(Path(os.path.join(__A , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__A , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__A , """env.csv""" ) ).exists() )
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__A :Optional[Any] ):
self.assertTrue(hasattr(__A , """sequential""" ) )
self.assertTrue(hasattr(__A , """cumulative""" ) )
self.assertTrue(hasattr(__A , """current""" ) )
self.assertTrue(hasattr(__A , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__A , """log.txt""" ) , log_print=__A , trace_memory_line_by_line=__A , eager_mode=__A , multi_process=__A , )
SCREAMING_SNAKE_CASE__ = TensorFlowBenchmark(__A )
SCREAMING_SNAKE_CASE__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__A , """log.txt""" ) ).exists() ) | 6 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
_lowerCamelCase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
_lowerCamelCase = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCamelCase = [0] * args.vocab_size
for k, v in counter.items():
_lowerCamelCase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 6 | 1 |
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = None
lowerCamelCase_ = None
_lowerCamelCase = namedtuple('CoinsDistribResult', 'moves excess')
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(UpperCamelCase__: TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(UpperCamelCase__: TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(UpperCamelCase__ ) != count_coins(UpperCamelCase__ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(UpperCamelCase__: TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_distrib(node.left )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_distrib(node.right )
SCREAMING_SNAKE_CASE__ = 1 - left_distrib_excess
SCREAMING_SNAKE_CASE__ = 1 - right_distrib_excess
SCREAMING_SNAKE_CASE__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(UpperCamelCase__ )
+ abs(UpperCamelCase__ )
)
SCREAMING_SNAKE_CASE__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(UpperCamelCase__ , UpperCamelCase__ )
return get_distrib(UpperCamelCase__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1E-3 ) )
@slow
def _snake_case ( self :Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1E-3 ) ) | 6 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "OwlViTImageProcessor"
lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
SCREAMING_SNAKE_CASE__ = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A ))
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = input_ids
SCREAMING_SNAKE_CASE__ = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
SCREAMING_SNAKE_CASE__ = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process(*__A , **__A )
def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*__A , **__A )
def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
) | 6 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE__ = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE__ = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.encoder(__A )
SCREAMING_SNAKE_CASE__ = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A )
else:
SCREAMING_SNAKE_CASE__ = h
SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A )
SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sample
SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents
SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A ) | 6 | 1 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
_lowerCamelCase = logging.get_logger(__name__)
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self :Optional[int] , **__A :Tuple ) -> Union[str, Any]:
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE__ = deprecated_arg[3:]
SCREAMING_SNAKE_CASE__ = not kwargs.pop(__A )
logger.warning(
f'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
f''' {positive_arg}={kwargs[positive_arg]}''' )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""tpu_name""" , self.tpu_name )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""device_idx""" , self.device_idx )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""eager_mode""" , self.eager_mode )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**__A )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={"help": "Name of TPU"} , )
lowerCamelCase_ = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
lowerCamelCase_ = field(default=UpperCamelCase__ , metadata={"help": "Benchmark models in eager model."} )
lowerCamelCase_ = field(
default=UpperCamelCase__ , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def _snake_case ( self :Any ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
SCREAMING_SNAKE_CASE__ = None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE__ = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE__ = None
return tpu
@cached_property
def _snake_case ( self :Dict ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE__ = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
SCREAMING_SNAKE_CASE__ = tf.distribute.OneDeviceStrategy(device=f'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
SCREAMING_SNAKE_CASE__ = tf.distribute.OneDeviceStrategy(device=f'''/cpu:{self.device_idx}''' )
return strategy
@property
def _snake_case ( self :int ) -> bool:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def _snake_case ( self :List[Any] ) -> "tf.distribute.Strategy":
"""simple docstring"""
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def _snake_case ( self :Union[str, Any] ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def _snake_case ( self :int ) -> int:
"""simple docstring"""
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def _snake_case ( self :Optional[int] ) -> bool:
"""simple docstring"""
return self.n_gpu > 0 | 6 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = jnp.floataa
lowerCamelCase_ = True
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype )
def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A )
SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ):
SCREAMING_SNAKE_CASE__ = logits.shape[-1]
SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" )
SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 )
SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ )
return loss
SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = "google/bigbird-roberta-base"
lowerCamelCase_ = 30_00
lowerCamelCase_ = 1_05_00
lowerCamelCase_ = 1_28
lowerCamelCase_ = 3
lowerCamelCase_ = 1
lowerCamelCase_ = 5
# tx_args
lowerCamelCase_ = 3e-5
lowerCamelCase_ = 0.0
lowerCamelCase_ = 2_00_00
lowerCamelCase_ = 0.0095
lowerCamelCase_ = "bigbird-roberta-natural-questions"
lowerCamelCase_ = "training-expt"
lowerCamelCase_ = "data/nq-training.jsonl"
lowerCamelCase_ = "data/nq-validation.jsonl"
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=__A )
SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir )
SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 40_96 # no dynamic padding on TPUs
def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.collate_fn(__A )
SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A )
return batch
def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": jnp.array(__A , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(__A , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _snake_case ( self :Tuple , __A :list ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids]
return zip(*__A )
def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )]
while len(__A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ):
if seed is not None:
SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) // batch_size ):
SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase__ )
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
def loss_fn(UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
return state.loss_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" )
SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class UpperCamelCase_ ( train_state.TrainState ):
lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = None
def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model.params
SCREAMING_SNAKE_CASE__ = TrainState.create(
apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , )
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A )
SCREAMING_SNAKE_CASE__ = train_state.TrainState(
step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = data_collator
SCREAMING_SNAKE_CASE__ = lr
SCREAMING_SNAKE_CASE__ = params
SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A )
return state
def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.args
SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() )
for epoch in range(args.max_epochs ):
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step )
SCREAMING_SNAKE_CASE__ = running_loss.item() / i
SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 )
SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(__A ) )
self.logger.log(__A , commit=__A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A )
def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size )
SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(__A , params=state.params )
with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) )
with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , __A )
print("""DONE""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
def weight_decay_mask(UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ )
return tx, lr | 6 | 1 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_lowerCamelCase = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_lowerCamelCase = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCamelCase__ )[0]
@deprecated(UpperCamelCase__ , """Please use tf.data to implement this functionality.""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(UpperCamelCase__ )
if magic != 2_051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = _readaa(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = _readaa(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = bytestream.read(rows * cols * num_images )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(UpperCamelCase__ , dtype=numpy.uinta )
SCREAMING_SNAKE_CASE__ = data.reshape(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 1 )
return data
@deprecated(UpperCamelCase__ , """Please use tf.one_hot on tensors.""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = labels_dense.shape[0]
SCREAMING_SNAKE_CASE__ = numpy.arange(UpperCamelCase__ ) * num_classes
SCREAMING_SNAKE_CASE__ = numpy.zeros((num_labels, num_classes) )
SCREAMING_SNAKE_CASE__ = 1
return labels_one_hot
@deprecated(UpperCamelCase__ , """Please use tf.data to implement this functionality.""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any]=False , UpperCamelCase__: List[Any]=10 ):
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream:
SCREAMING_SNAKE_CASE__ = _readaa(UpperCamelCase__ )
if magic != 2_049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
SCREAMING_SNAKE_CASE__ = _readaa(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = bytestream.read(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = numpy.frombuffer(UpperCamelCase__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCamelCase__ , UpperCamelCase__ )
return labels
class UpperCamelCase_ :
@deprecated(
__A , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self :Optional[Any] , __A :List[Any] , __A :Tuple , __A :Union[str, Any]=False , __A :int=False , __A :Optional[Any]=dtypes.floataa , __A :Any=True , __A :Optional[Any]=None , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = random_seed.get_seed(__A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
SCREAMING_SNAKE_CASE__ = dtypes.as_dtype(__A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
SCREAMING_SNAKE_CASE__ = 1_0000
SCREAMING_SNAKE_CASE__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
SCREAMING_SNAKE_CASE__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
SCREAMING_SNAKE_CASE__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
SCREAMING_SNAKE_CASE__ = images.astype(numpy.floataa )
SCREAMING_SNAKE_CASE__ = numpy.multiply(__A , 1.0 / 2_5_5.0 )
SCREAMING_SNAKE_CASE__ = images
SCREAMING_SNAKE_CASE__ = labels
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
@property
def _snake_case ( self :str ) -> List[str]:
"""simple docstring"""
return self._images
@property
def _snake_case ( self :str ) -> List[str]:
"""simple docstring"""
return self._labels
@property
def _snake_case ( self :List[str] ) -> str:
"""simple docstring"""
return self._num_examples
@property
def _snake_case ( self :List[Any] ) -> Optional[Any]:
"""simple docstring"""
return self._epochs_completed
def _snake_case ( self :Union[str, Any] , __A :Union[str, Any] , __A :str=False , __A :str=True ) -> List[str]:
"""simple docstring"""
if fake_data:
SCREAMING_SNAKE_CASE__ = [1] * 784
SCREAMING_SNAKE_CASE__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__A )],
[fake_label for _ in range(__A )],
)
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__A )
SCREAMING_SNAKE_CASE__ = self.images[perma]
SCREAMING_SNAKE_CASE__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
SCREAMING_SNAKE_CASE__ = self._num_examples - start
SCREAMING_SNAKE_CASE__ = self._images[start : self._num_examples]
SCREAMING_SNAKE_CASE__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
SCREAMING_SNAKE_CASE__ = numpy.arange(self._num_examples )
numpy.random.shuffle(__A )
SCREAMING_SNAKE_CASE__ = self.images[perm]
SCREAMING_SNAKE_CASE__ = self.labels[perm]
# Start next epoch
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = batch_size - rest_num_examples
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
SCREAMING_SNAKE_CASE__ = self._images[start:end]
SCREAMING_SNAKE_CASE__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
SCREAMING_SNAKE_CASE__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCamelCase__ , """Please write your own downloading logic.""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] ):
if not gfile.Exists(UpperCamelCase__ ):
gfile.MakeDirs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not gfile.Exists(UpperCamelCase__ ):
urllib.request.urlretrieve(UpperCamelCase__ , UpperCamelCase__ ) # noqa: S310
with gfile.GFile(UpperCamelCase__ ) as f:
SCREAMING_SNAKE_CASE__ = f.size()
print("""Successfully downloaded""" , UpperCamelCase__ , UpperCamelCase__ , """bytes.""" )
return filepath
@deprecated(
UpperCamelCase__ , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any]=False , UpperCamelCase__: Dict=False , UpperCamelCase__: Union[str, Any]=dtypes.floataa , UpperCamelCase__: Any=True , UpperCamelCase__: List[Any]=5_000 , UpperCamelCase__: List[Any]=None , UpperCamelCase__: List[Any]=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCamelCase__ , one_hot=UpperCamelCase__ , dtype=UpperCamelCase__ , seed=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
SCREAMING_SNAKE_CASE__ = fake()
return _Datasets(train=UpperCamelCase__ , validation=UpperCamelCase__ , test=UpperCamelCase__ )
if not source_url: # empty string check
SCREAMING_SNAKE_CASE__ = DEFAULT_SOURCE_URL
SCREAMING_SNAKE_CASE__ = """train-images-idx3-ubyte.gz"""
SCREAMING_SNAKE_CASE__ = """train-labels-idx1-ubyte.gz"""
SCREAMING_SNAKE_CASE__ = """t10k-images-idx3-ubyte.gz"""
SCREAMING_SNAKE_CASE__ = """t10k-labels-idx1-ubyte.gz"""
SCREAMING_SNAKE_CASE__ = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + train_images_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + train_labels_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(UpperCamelCase__ , one_hot=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + test_images_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = _extract_images(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = _maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + test_labels_file )
with gfile.Open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = _extract_labels(UpperCamelCase__ , one_hot=UpperCamelCase__ )
if not 0 <= validation_size <= len(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = (
"""Validation size should be between 0 and """
f'''{len(UpperCamelCase__ )}. Received: {validation_size}.'''
)
raise ValueError(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = train_images[:validation_size]
SCREAMING_SNAKE_CASE__ = train_labels[:validation_size]
SCREAMING_SNAKE_CASE__ = train_images[validation_size:]
SCREAMING_SNAKE_CASE__ = train_labels[validation_size:]
SCREAMING_SNAKE_CASE__ = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
SCREAMING_SNAKE_CASE__ = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = _DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
return _Datasets(train=UpperCamelCase__ , validation=UpperCamelCase__ , test=UpperCamelCase__ ) | 6 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 6 | 1 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class UpperCamelCase_ :
def __init__( self :str , __A :str , __A :List[Any] , __A :bool = True , __A :bool = False ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = scheduler
SCREAMING_SNAKE_CASE__ = optimizers if isinstance(__A , (list, tuple) ) else [optimizers]
SCREAMING_SNAKE_CASE__ = split_batches
SCREAMING_SNAKE_CASE__ = step_with_optimizer
SCREAMING_SNAKE_CASE__ = GradientState()
def _snake_case ( self :List[Any] , *__A :Dict , **__A :Any ) -> Optional[int]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__A , **__A )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__A , **__A )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
SCREAMING_SNAKE_CASE__ = AcceleratorState().num_processes
for _ in range(__A ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__A , **__A )
else:
self.scheduler.step(*__A , **__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def _snake_case ( self :Union[str, Any] ) -> str:
"""simple docstring"""
return self.scheduler.state_dict()
def _snake_case ( self :Tuple , __A :Any ) -> Optional[int]:
"""simple docstring"""
self.scheduler.load_state_dict(__A )
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_lr()
def _snake_case ( self :Any , *__A :Optional[int] , **__A :Optional[int] ) -> List[str]:
"""simple docstring"""
return self.scheduler.print_lr(*__A , **__A ) | 6 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 847
SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 150
SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 171
SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE__ = 133
SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 65
SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = val
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ):
SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ )
# load original state_dict
with open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase__ , param.shape )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE__ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE__ = 65
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE__ = 65_535
else:
SCREAMING_SNAKE_CASE__ = 255
SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False
SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 6 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
for i in range(1 , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = collection[i]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = i - 1
while low <= high:
SCREAMING_SNAKE_CASE__ = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE__ = mid - 1
else:
SCREAMING_SNAKE_CASE__ = mid + 1
for j in range(UpperCamelCase__ , UpperCamelCase__ , -1 ):
SCREAMING_SNAKE_CASE__ = collection[j - 1]
SCREAMING_SNAKE_CASE__ = val
return collection
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted)) | 6 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase = 0
_lowerCamelCase = 0XE0_00
_lowerCamelCase = 0XE0_01
_lowerCamelCase = 0XE0_02
_lowerCamelCase = 0XE0_03
_lowerCamelCase = 0XE0_04
# Maps special codepoints to human-readable names.
_lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def _snake_case ( self :Tuple , __A :str ) -> List[str]:
"""simple docstring"""
return list(__A )
def _snake_case ( self :Optional[Any] , __A :str ) -> int:
"""simple docstring"""
try:
return ord(__A )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _snake_case ( self :str , __A :int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__A )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any:
"""simple docstring"""
return "".join(__A )
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
result += ([0] * len(__A )) + [1]
return result
def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any:
"""simple docstring"""
return () | 6 | 1 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase_ :
def __init__( self :List[Any] , __A :Dict , __A :Tuple=14 , __A :Optional[int]=7 , __A :Any=True , __A :Tuple=True , __A :List[str]=False , __A :int=True , __A :Optional[Any]=99 , __A :Any=32 , __A :Optional[int]=4 , __A :Optional[Any]=4 , __A :Union[str, Any]=4 , __A :Tuple=37 , __A :Tuple="gelu" , __A :Union[str, Any]=0.1 , __A :List[str]=0.1 , __A :Optional[int]=512 , __A :str=0.0_2 , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = rotary_dim
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = vocab_size - 1
SCREAMING_SNAKE_CASE__ = vocab_size - 1
SCREAMING_SNAKE_CASE__ = vocab_size - 1
def _snake_case ( self :str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _snake_case ( self :int , __A :List[Any] , __A :int , __A :Optional[Any] , __A :int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = model_class_name(__A )
SCREAMING_SNAKE_CASE__ = model.init_cache(input_ids.shape[0] , __A )
SCREAMING_SNAKE_CASE__ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , )
SCREAMING_SNAKE_CASE__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, -1:] , attention_mask=__A , past_key_values=outputs_cache.past_key_values , position_ids=__A , )
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
def _snake_case ( self :Any , __A :str , __A :int , __A :Any , __A :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = model_class_name(__A )
SCREAMING_SNAKE_CASE__ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
SCREAMING_SNAKE_CASE__ = model.init_cache(input_ids.shape[0] , __A )
SCREAMING_SNAKE_CASE__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , )
SCREAMING_SNAKE_CASE__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
SCREAMING_SNAKE_CASE__ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__A , position_ids=__A , )
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A )
SCREAMING_SNAKE_CASE__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCamelCase_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaxGPTJModelTester(self )
def _snake_case ( self :int ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__A , __A , __A , __A )
def _snake_case ( self :int ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__A , __A , __A , __A )
@tooslow
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
SCREAMING_SNAKE_CASE__ = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=__A , truncation=__A )
SCREAMING_SNAKE_CASE__ = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = model.config.eos_token_id
SCREAMING_SNAKE_CASE__ = jax.jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(__A , skip_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(__A , __A )
@is_pt_flax_cross_test
def _snake_case ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ = getattr(__A , __A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = pt_model_class(__A ).eval()
SCREAMING_SNAKE_CASE__ = model_class(__A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __A )
SCREAMING_SNAKE_CASE__ = fx_state
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model(**__A ).to_tuple()
SCREAMING_SNAKE_CASE__ = fx_model(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__A )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(__A , from_pt=__A )
SCREAMING_SNAKE_CASE__ = fx_model_loaded(**__A ).to_tuple()
self.assertEqual(
len(__A ) , len(__A ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
SCREAMING_SNAKE_CASE__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ = getattr(__A , __A )
SCREAMING_SNAKE_CASE__ = pt_model_class(__A ).eval()
SCREAMING_SNAKE_CASE__ = model_class(__A , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = load_flax_weights_in_pytorch_model(__A , fx_model.params )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = pt_inputs["""input_ids"""].shape
SCREAMING_SNAKE_CASE__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model(**__A ).to_tuple()
SCREAMING_SNAKE_CASE__ = fx_model(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__A )
SCREAMING_SNAKE_CASE__ = pt_model_class.from_pretrained(__A , from_flax=__A )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = pt_model_loaded(**__A ).to_tuple()
self.assertEqual(
len(__A ) , len(__A ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(__A , __A ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def _snake_case ( self :Optional[Any] ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
SCREAMING_SNAKE_CASE__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__A ) | 6 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase = 'bert-base-cased'
_lowerCamelCase = 'fp16'
_lowerCamelCase = 'bf16'
_lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = f'''{i + 1}'''
SCREAMING_SNAKE_CASE__ = strategy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = prefetch_policy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = state_dict_type
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """2000"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """T5Layer"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """0"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = mp_dtype
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ = torch.bfloataa
SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = str(__A ).lower()
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = 0.8_2
SCREAMING_SNAKE_CASE__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 6 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "swinv2"
lowerCamelCase_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self :int , __A :Tuple=224 , __A :Any=4 , __A :int=3 , __A :Optional[int]=96 , __A :Tuple=[2, 2, 6, 2] , __A :Dict=[3, 6, 12, 24] , __A :Dict=7 , __A :List[Any]=4.0 , __A :Tuple=True , __A :Dict=0.0 , __A :Optional[Any]=0.0 , __A :List[Any]=0.1 , __A :int="gelu" , __A :List[str]=False , __A :Optional[int]=0.0_2 , __A :Optional[Any]=1E-5 , __A :str=32 , **__A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**__A )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embed_dim
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = len(__A )
SCREAMING_SNAKE_CASE__ = num_heads
SCREAMING_SNAKE_CASE__ = window_size
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ = int(embed_dim * 2 ** (len(__A ) - 1) )
SCREAMING_SNAKE_CASE__ = (0, 0, 0, 0) | 6 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'PoolFormerConfig'
# Base docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE__ = 1 - drop_prob
SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor
return output
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = drop_prob
def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__A , self.drop_prob , self.training )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity()
def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.projection(__A )
SCREAMING_SNAKE_CASE__ = self.norm(__A )
return embeddings
class UpperCamelCase_ ( nn.GroupNorm ):
def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(1 , __A , **__A )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[str] , __A :Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.pool(__A ) - hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ = config.hidden_act
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.act_fn(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
# Useful for training neural nets
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE__ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str:
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE__ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) )
SCREAMING_SNAKE_CASE__ = hidden_states + layer_output
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
# Transformer blocks
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE__ = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = blk(__A )
SCREAMING_SNAKE_CASE__ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PoolFormerConfig
lowerCamelCase_ = "poolformer"
lowerCamelCase_ = "pixel_values"
lowerCamelCase_ = True
def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict:
"""simple docstring"""
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = value
_lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Any ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
SCREAMING_SNAKE_CASE__ = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :int , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self :List[Any] , __A :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :str , __A :Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A )
# Final norm
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = outputs[0]
SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states ) | 6 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCamelCase = {
'facebook/mask2former-swin-small-coco-instance': (
'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "mask2former"
lowerCamelCase_ = ["swin"]
lowerCamelCase_ = {"hidden_size": "hidden_dim"}
def __init__( self :List[str] , __A :Optional[Dict] = None , __A :int = 256 , __A :int = 256 , __A :int = 256 , __A :int = 1024 , __A :str = "relu" , __A :int = 6 , __A :int = 10 , __A :int = 8 , __A :float = 0.0 , __A :int = 2048 , __A :bool = False , __A :bool = False , __A :int = 4 , __A :int = 255 , __A :int = 100 , __A :float = 0.1 , __A :float = 2.0 , __A :float = 5.0 , __A :float = 5.0 , __A :int = 1_2544 , __A :float = 3.0 , __A :float = 0.7_5 , __A :float = 0.0_2 , __A :float = 1.0 , __A :bool = True , __A :List[int] = [4, 8, 16, 32] , __A :bool = None , **__A :Tuple , ) -> Optional[Any]:
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__A , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = backbone_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ = config_class.from_dict(__A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
SCREAMING_SNAKE_CASE__ = backbone_config
SCREAMING_SNAKE_CASE__ = feature_size
SCREAMING_SNAKE_CASE__ = mask_feature_size
SCREAMING_SNAKE_CASE__ = hidden_dim
SCREAMING_SNAKE_CASE__ = encoder_feedforward_dim
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = dim_feedforward
SCREAMING_SNAKE_CASE__ = pre_norm
SCREAMING_SNAKE_CASE__ = enforce_input_projection
SCREAMING_SNAKE_CASE__ = common_stride
SCREAMING_SNAKE_CASE__ = ignore_value
SCREAMING_SNAKE_CASE__ = num_queries
SCREAMING_SNAKE_CASE__ = no_object_weight
SCREAMING_SNAKE_CASE__ = class_weight
SCREAMING_SNAKE_CASE__ = mask_weight
SCREAMING_SNAKE_CASE__ = dice_weight
SCREAMING_SNAKE_CASE__ = train_num_points
SCREAMING_SNAKE_CASE__ = oversample_ratio
SCREAMING_SNAKE_CASE__ = importance_sample_ratio
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = init_xavier_std
SCREAMING_SNAKE_CASE__ = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ = feature_strides
SCREAMING_SNAKE_CASE__ = output_auxiliary_logits
SCREAMING_SNAKE_CASE__ = decoder_layers
super().__init__(**__A )
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , **__A :int ) -> Tuple:
"""simple docstring"""
return cls(
backbone_config=__A , **__A , )
def _snake_case ( self :Tuple ) -> Dict[str, any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 6 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_lengths
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = gelu_activation
SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ = causal
SCREAMING_SNAKE_CASE__ = asm
SCREAMING_SNAKE_CASE__ = n_langs
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_special
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = summary_type
SCREAMING_SNAKE_CASE__ = use_proj
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__A )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 6 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 6 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ):
model.train()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE__ = RegressionModel()
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 6 | 1 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str = "cpu" , UpperCamelCase__: Union[str, None] = None ):
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location=UpperCamelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(UpperCamelCase__ , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
SCREAMING_SNAKE_CASE__ = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE__ = src_path
torch.save(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
fire.Fire(convert) | 6 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "AutoImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict:
"""simple docstring"""
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"] | 6 | 1 |
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
def __init__( self :List[str] , __A :int , __A :int , __A :float = 0 ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = row, column
SCREAMING_SNAKE_CASE__ = [[default_value for c in range(__A )] for r in range(__A )]
def __str__( self :Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
SCREAMING_SNAKE_CASE__ = 0
for row_vector in self.array:
for obj in row_vector:
SCREAMING_SNAKE_CASE__ = max(__A , len(str(__A ) ) )
SCREAMING_SNAKE_CASE__ = f'''%{max_element_length}s'''
# Make string and return
def single_line(__A :list[float] ) -> str:
nonlocal string_format_identifier
SCREAMING_SNAKE_CASE__ = """["""
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__A ) for row_vector in self.array )
return s
def __repr__( self :Optional[Any] ) -> str:
"""simple docstring"""
return str(self )
def _snake_case ( self :Tuple , __A :tuple[int, int] ) -> bool:
"""simple docstring"""
if not (isinstance(__A , (list, tuple) ) and len(__A ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self :List[Any] , __A :tuple[int, int] ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__A )
return self.array[loc[0]][loc[1]]
def __setitem__( self :int , __A :tuple[int, int] , __A :float ) -> None:
"""simple docstring"""
assert self.validate_indicies(__A )
SCREAMING_SNAKE_CASE__ = value
def __add__( self :Any , __A :Matrix ) -> Matrix:
"""simple docstring"""
assert isinstance(__A , __A )
assert self.row == another.row and self.column == another.column
# Add
SCREAMING_SNAKE_CASE__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ = self[r, c] + another[r, c]
return result
def __neg__( self :Dict ) -> Matrix:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ = -self[r, c]
return result
def __sub__( self :Union[str, Any] , __A :Matrix ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self :Tuple , __A :int | float | Matrix ) -> Matrix:
"""simple docstring"""
if isinstance(__A , (int, float) ): # Scalar multiplication
SCREAMING_SNAKE_CASE__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ = self[r, c] * another
return result
elif isinstance(__A , __A ): # Matrix multiplication
assert self.column == another.row
SCREAMING_SNAKE_CASE__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
SCREAMING_SNAKE_CASE__ = f'''Unsupported type given for another ({type(__A )})'''
raise TypeError(__A )
def _snake_case ( self :int ) -> Matrix:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE__ = self[r, c]
return result
def _snake_case ( self :List[str] , __A :Matrix , __A :Matrix ) -> Any:
"""simple docstring"""
assert isinstance(__A , __A ) and isinstance(__A , __A )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
SCREAMING_SNAKE_CASE__ = v.transpose()
SCREAMING_SNAKE_CASE__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__ ( ):
# a^(-1)
SCREAMING_SNAKE_CASE__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
SCREAMING_SNAKE_CASE__ = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
SCREAMING_SNAKE_CASE__ = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1, 2, -3
SCREAMING_SNAKE_CASE__ = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase__ , UpperCamelCase__ )}''' )
def SCREAMING_SNAKE_CASE__ ( ):
import doctest
doctest.testmod()
testa() | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE__ = s - 2 * j
break
return diff | 6 | 1 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE__ = BertConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
SCREAMING_SNAKE_CASE__ = BertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 6 | 1 |
class UpperCamelCase_ :
def __init__( self :int , __A :list ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = set_counts
SCREAMING_SNAKE_CASE__ = max(__A )
SCREAMING_SNAKE_CASE__ = len(__A )
SCREAMING_SNAKE_CASE__ = [1] * num_sets
SCREAMING_SNAKE_CASE__ = list(range(__A ) )
def _snake_case ( self :Union[str, Any] , __A :int , __A :int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_parent(__A )
SCREAMING_SNAKE_CASE__ = self.get_parent(__A )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE__ = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = src_parent
SCREAMING_SNAKE_CASE__ = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE__ = max(self.max_set , __A )
return True
def _snake_case ( self :Union[str, Any] , __A :int ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE__ = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set] | 6 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 6 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCamelCase = 'facebook/wmt19-en-de'
_lowerCamelCase = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCamelCase = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCamelCase = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCamelCase = tokenizer(['Making tiny model'], return_tensors='pt')
_lowerCamelCase = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
_lowerCamelCase = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de | 6 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class UpperCamelCase_ ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowerCamelCase_ = Features({"text": Value("string" )} )
lowerCamelCase_ = Features({"labels": ClassLabel} )
lowerCamelCase_ = "text"
lowerCamelCase_ = "labels"
def _snake_case ( self :Any , __A :Dict ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ = self.label_schema.copy()
SCREAMING_SNAKE_CASE__ = features[self.label_column]
SCREAMING_SNAKE_CASE__ = label_schema
return task_template
@property
def _snake_case ( self :str ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
} | 6 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
SCREAMING_SNAKE_CASE__ = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_5_5.0
SCREAMING_SNAKE_CASE__ = image[None].transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
return 2.0 * image - 1.0
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Any , __A :VQModel , __A :UNetaDModel , __A :Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=__A , unet=__A , scheduler=__A )
@torch.no_grad()
def __call__( self :List[Any] , __A :Union[torch.Tensor, PIL.Image.Image] = None , __A :Optional[int] = 1 , __A :Optional[int] = 100 , __A :Optional[float] = 0.0 , __A :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A :Optional[str] = "pil" , __A :bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
if isinstance(__A , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = 1
elif isinstance(__A , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__A )}''' )
if isinstance(__A , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = preprocess(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels // 2, height, width)
SCREAMING_SNAKE_CASE__ = next(self.unet.parameters() ).dtype
SCREAMING_SNAKE_CASE__ = randn_tensor(__A , generator=__A , device=self.device , dtype=__A )
SCREAMING_SNAKE_CASE__ = image.to(device=self.device , dtype=__A )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__A , device=self.device )
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__ = {}
if accepts_eta:
SCREAMING_SNAKE_CASE__ = eta
for t in self.progress_bar(__A ):
# concat latents and low resolution image in the channel dimension.
SCREAMING_SNAKE_CASE__ = torch.cat([latents, image] , dim=1 )
SCREAMING_SNAKE_CASE__ = self.scheduler.scale_model_input(__A , __A )
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(__A , __A ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(__A , __A , __A , **__A ).prev_sample
# decode the image latents with the VQVAE
SCREAMING_SNAKE_CASE__ = self.vqvae.decode(__A ).sample
SCREAMING_SNAKE_CASE__ = torch.clamp(__A , -1.0 , 1.0 )
SCREAMING_SNAKE_CASE__ = image / 2 + 0.5
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A ) | 6 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = model.config
SCREAMING_SNAKE_CASE__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
SCREAMING_SNAKE_CASE__ = MBartConfig(
is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
if "encoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
SCREAMING_SNAKE_CASE__ = """encoder.""" + name
if "attn.proj" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
SCREAMING_SNAKE_CASE__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE__ = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
SCREAMING_SNAKE_CASE__ = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[int] ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_split[3] )
SCREAMING_SNAKE_CASE__ = int(key_split[5] )
SCREAMING_SNAKE_CASE__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
SCREAMING_SNAKE_CASE__ = val[:dim, :]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE__ = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE__ = val[:dim]
SCREAMING_SNAKE_CASE__ = val[dim : dim * 2]
SCREAMING_SNAKE_CASE__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
SCREAMING_SNAKE_CASE__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int=None , UpperCamelCase__: str=False ):
# load original model
SCREAMING_SNAKE_CASE__ = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_configs(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutSwinModel(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
SCREAMING_SNAKE_CASE__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/example-documents""" )
SCREAMING_SNAKE_CASE__ = dataset["""test"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
SCREAMING_SNAKE_CASE__ = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
SCREAMING_SNAKE_CASE__ = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
SCREAMING_SNAKE_CASE__ = """When is the coffee break?"""
SCREAMING_SNAKE_CASE__ = task_prompt.replace("""{user_input}""" , UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
SCREAMING_SNAKE_CASE__ = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
SCREAMING_SNAKE_CASE__ = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
SCREAMING_SNAKE_CASE__ = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
SCREAMING_SNAKE_CASE__ = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
SCREAMING_SNAKE_CASE__ = """hello world"""
else:
raise ValueError("""Model name not supported""" )
SCREAMING_SNAKE_CASE__ = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[
"""input_ids"""
]
SCREAMING_SNAKE_CASE__ = original_model.encoder.model.patch_embed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
# verify encoder hidden states
SCREAMING_SNAKE_CASE__ = original_model.encoder(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
# verify decoder hidden states
SCREAMING_SNAKE_CASE__ = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
_lowerCamelCase = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 6 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 100 ):
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
for i in range(2 , max_n + 1 ):
SCREAMING_SNAKE_CASE__ = pre_numerator
SCREAMING_SNAKE_CASE__ = 2 * i // 3 if i % 3 == 0 else 1
SCREAMING_SNAKE_CASE__ = cur_numerator
SCREAMING_SNAKE_CASE__ = e_cont * pre_numerator + temp
return sum_digits(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''') | 6 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 6 | 1 |
import os
def SCREAMING_SNAKE_CASE__ ( ):
with open(os.path.dirname(UpperCamelCase__ ) + """/p022_names.txt""" ) as file:
SCREAMING_SNAKE_CASE__ = str(file.readlines()[0] )
SCREAMING_SNAKE_CASE__ = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, name in enumerate(UpperCamelCase__ ):
for letter in name:
name_score += ord(UpperCamelCase__ ) - 64
total_score += (i + 1) * name_score
SCREAMING_SNAKE_CASE__ = 0
return total_score
if __name__ == "__main__":
print(solution()) | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE__ = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE__ = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''') | 6 | 1 |
from pathlib import Path
import fire
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = Path(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = Path(UpperCamelCase__ )
dest_dir.mkdir(exist_ok=UpperCamelCase__ )
for path in src_dir.iterdir():
SCREAMING_SNAKE_CASE__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
SCREAMING_SNAKE_CASE__ = dest_dir.joinpath(path.name )
print(UpperCamelCase__ )
dest_path.open("""w""" ).write("""\n""".join(UpperCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify) | 6 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) | 6 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_vision_text_dual_encoder': ['VisionTextDualEncoderConfig'],
'processing_vision_text_dual_encoder': ['VisionTextDualEncoderProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['VisionTextDualEncoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxVisionTextDualEncoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['TFVisionTextDualEncoderModel']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure) | 6 |
import argparse
import datetime
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
SCREAMING_SNAKE_CASE__ = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
SCREAMING_SNAKE_CASE__ = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
SCREAMING_SNAKE_CASE__ = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
SCREAMING_SNAKE_CASE__ = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
SCREAMING_SNAKE_CASE__ = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
SCREAMING_SNAKE_CASE__ = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
SCREAMING_SNAKE_CASE__ = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE__ = y - 1
SCREAMING_SNAKE_CASE__ = m + 12
# maths var
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[:2] )
SCREAMING_SNAKE_CASE__ = int(str(UpperCamelCase__ )[2:] )
SCREAMING_SNAKE_CASE__ = int(2.6 * m - 5.3_9 )
SCREAMING_SNAKE_CASE__ = int(c / 4 )
SCREAMING_SNAKE_CASE__ = int(k / 4 )
SCREAMING_SNAKE_CASE__ = int(d + k )
SCREAMING_SNAKE_CASE__ = int(t + u + v + x )
SCREAMING_SNAKE_CASE__ = int(z - (2 * c) )
SCREAMING_SNAKE_CASE__ = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
SCREAMING_SNAKE_CASE__ = f'''Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
_lowerCamelCase = parser.parse_args()
zeller(args.date_input) | 6 | 1 |
_lowerCamelCase = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_lowerCamelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
_lowerCamelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
} | 6 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=30522, type=int)
_lowerCamelCase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, 'rb') as fp:
_lowerCamelCase = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCamelCase = [0] * args.vocab_size
for k, v in counter.items():
_lowerCamelCase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL) | 6 | 1 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: list ):
if any(not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(UpperCamelCase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(UpperCamelCase__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9] | 6 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 | 1 |
import math
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Any ):
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(UpperCamelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_lowerCamelCase = 'Enter the base and the power separated by a comma: '
_lowerCamelCase , _lowerCamelCase = map(int, input(prompt).split(','))
_lowerCamelCase , _lowerCamelCase = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_lowerCamelCase = res(xa, ya)
_lowerCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal') | 6 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "OwlViTImageProcessor"
lowerCamelCase_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self :Optional[Any] , __A :int=None , __A :Optional[int]=None , **__A :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
def __call__( self :str , __A :Dict=None , __A :List[str]=None , __A :str=None , __A :Optional[int]="max_length" , __A :Tuple="np" , **__A :int ) -> Tuple:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__A , __A ) or (isinstance(__A , __A ) and not isinstance(text[0] , __A )):
SCREAMING_SNAKE_CASE__ = [self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )]
elif isinstance(__A , __A ) and isinstance(text[0] , __A ):
SCREAMING_SNAKE_CASE__ = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ = max([len(__A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__A ) != max_num_queries:
SCREAMING_SNAKE_CASE__ = t + [""" """] * (max_num_queries - len(__A ))
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , padding=__A , return_tensors=__A , **__A )
encodings.append(__A )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = input_ids
SCREAMING_SNAKE_CASE__ = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ = BatchEncoding()
SCREAMING_SNAKE_CASE__ = self.image_processor(
__A , return_tensors=__A , **__A ).pixel_values
SCREAMING_SNAKE_CASE__ = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :List[Any] , *__A :Dict , **__A :Dict ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process(*__A , **__A )
def _snake_case ( self :Optional[int] , *__A :Dict , **__A :List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*__A , **__A )
def _snake_case ( self :str , *__A :List[str] , **__A :Union[str, Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*__A , **__A )
def _snake_case ( self :Dict , *__A :List[str] , **__A :List[str] ) -> int:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Dict , *__A :Dict , **__A :List[str] ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = filter(lambda UpperCamelCase__ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE__ = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_lowerCamelCase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Dict ):
if metric == "rouge2":
SCREAMING_SNAKE_CASE__ = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
SCREAMING_SNAKE_CASE__ = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
SCREAMING_SNAKE_CASE__ = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
SCREAMING_SNAKE_CASE__ = ModelCheckpoint(
dirpath=UpperCamelCase__ , filename=UpperCamelCase__ , monitor=f'''val_{metric}''' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] ):
return EarlyStopping(
monitor=f'''val_{metric}''' , mode="""min""" if """loss""" in metric else """max""" , patience=UpperCamelCase__ , verbose=UpperCamelCase__ , )
class UpperCamelCase_ ( pl.Callback ):
def _snake_case ( self :List[Any] , __A :Any , __A :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {f'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__A )
@rank_zero_only
def _snake_case ( self :Union[str, Any] , __A :pl.Trainer , __A :pl.LightningModule , __A :str , __A :str=True ) -> None:
"""simple docstring"""
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
SCREAMING_SNAKE_CASE__ = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
SCREAMING_SNAKE_CASE__ = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE__ = od / """test_results.txt"""
SCREAMING_SNAKE_CASE__ = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE__ = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
SCREAMING_SNAKE_CASE__ = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__A )
generations_file.parent.mkdir(exist_ok=__A )
with open(__A , """a+""" ) as writer:
for key in sorted(__A ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE__ = metrics[key]
if isinstance(__A , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = val.item()
SCREAMING_SNAKE_CASE__ = f'''{key}: {val:.6f}\n'''
writer.write(__A )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE__ = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(__A )
@rank_zero_only
def _snake_case ( self :List[str] , __A :str , __A :Union[str, Any] ) -> List[str]:
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE__ = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE__ = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE__ = count_trainable_parameters(__A )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def _snake_case ( self :List[Any] , __A :pl.Trainer , __A :pl.LightningModule ) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__A , __A , """test""" )
@rank_zero_only
def _snake_case ( self :int , __A :pl.Trainer , __A :Dict ) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 6 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE__ = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE__ = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.encoder(__A )
SCREAMING_SNAKE_CASE__ = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A )
else:
SCREAMING_SNAKE_CASE__ = h
SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A )
SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sample
SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents
SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A ) | 6 | 1 |
_lowerCamelCase = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_lowerCamelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
_lowerCamelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
} | 6 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
lowerCamelCase_ = jnp.floataa
lowerCamelCase_ = True
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
super().setup()
SCREAMING_SNAKE_CASE__ = nn.Dense(5 , dtype=self.dtype )
def __call__( self :List[Any] , *__A :int , **__A :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().__call__(*__A , **__A )
SCREAMING_SNAKE_CASE__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = FlaxBigBirdForNaturalQuestionsModule
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple ):
def cross_entropy(UpperCamelCase__: List[str] , UpperCamelCase__: List[str] , UpperCamelCase__: List[str]=None ):
SCREAMING_SNAKE_CASE__ = logits.shape[-1]
SCREAMING_SNAKE_CASE__ = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype("""f4""" )
SCREAMING_SNAKE_CASE__ = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 )
SCREAMING_SNAKE_CASE__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
SCREAMING_SNAKE_CASE__ = reduction(UpperCamelCase__ )
return loss
SCREAMING_SNAKE_CASE__ = partial(UpperCamelCase__ , reduction=jnp.mean )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = "google/bigbird-roberta-base"
lowerCamelCase_ = 30_00
lowerCamelCase_ = 1_05_00
lowerCamelCase_ = 1_28
lowerCamelCase_ = 3
lowerCamelCase_ = 1
lowerCamelCase_ = 5
# tx_args
lowerCamelCase_ = 3e-5
lowerCamelCase_ = 0.0
lowerCamelCase_ = 2_00_00
lowerCamelCase_ = 0.0095
lowerCamelCase_ = "bigbird-roberta-natural-questions"
lowerCamelCase_ = "training-expt"
lowerCamelCase_ = "data/nq-training.jsonl"
lowerCamelCase_ = "data/nq-validation.jsonl"
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=__A )
SCREAMING_SNAKE_CASE__ = os.path.join(self.base_dir , self.save_dir )
SCREAMING_SNAKE_CASE__ = self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 40_96 # no dynamic padding on TPUs
def __call__( self :Optional[Any] , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.collate_fn(__A )
SCREAMING_SNAKE_CASE__ = jax.tree_util.tree_map(__A , __A )
return batch
def _snake_case ( self :List[Any] , __A :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.fetch_inputs(features["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": jnp.array(__A , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(__A , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def _snake_case ( self :Tuple , __A :list ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self._fetch_inputs(__A ) for ids in input_ids]
return zip(*__A )
def _snake_case ( self :List[str] , __A :list ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [1 for _ in range(len(__A ) )]
while len(__A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any]=None ):
if seed is not None:
SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) // batch_size ):
SCREAMING_SNAKE_CASE__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase__ )
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Dict , UpperCamelCase__: Optional[int] , **UpperCamelCase__: Optional[int] ):
def loss_fn(UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
return state.loss_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = jax.random.split(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.value_and_grad(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = grad_fn(state.params )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean(UpperCamelCase__ , """batch""" )
SCREAMING_SNAKE_CASE__ = state.apply_gradients(grads=UpperCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , **UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""start_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""end_labels""" )
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""pooled_labels""" )
SCREAMING_SNAKE_CASE__ = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs
SCREAMING_SNAKE_CASE__ = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class UpperCamelCase_ ( train_state.TrainState ):
lowerCamelCase_ = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = None
def _snake_case ( self :List[Any] , __A :str , __A :str , __A :str , __A :Tuple=None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model.params
SCREAMING_SNAKE_CASE__ = TrainState.create(
apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , )
if ckpt_dir is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = restore_checkpoint(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = build_tx(**__A )
SCREAMING_SNAKE_CASE__ = train_state.TrainState(
step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , )
SCREAMING_SNAKE_CASE__ = args
SCREAMING_SNAKE_CASE__ = data_collator
SCREAMING_SNAKE_CASE__ = lr
SCREAMING_SNAKE_CASE__ = params
SCREAMING_SNAKE_CASE__ = jax_utils.replicate(__A )
return state
def _snake_case ( self :Optional[Any] , __A :Optional[int] , __A :int , __A :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.args
SCREAMING_SNAKE_CASE__ = len(__A ) // args.batch_size
SCREAMING_SNAKE_CASE__ = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ = jax.random.split(__A , jax.device_count() )
for epoch in range(args.max_epochs ):
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , args.batch_size , seed=__A )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc=f'''Running EPOCH-{epoch}''' ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.train_step_fn(__A , __A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(state.step )
SCREAMING_SNAKE_CASE__ = running_loss.item() / i
SCREAMING_SNAKE_CASE__ = self.scheduler_fn(state_step - 1 )
SCREAMING_SNAKE_CASE__ = self.evaluate(__A , __A )
SCREAMING_SNAKE_CASE__ = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(__A ) )
self.logger.log(__A , commit=__A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=__A )
def _snake_case ( self :List[str] , __A :Dict , __A :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_batched_dataset(__A , self.args.batch_size )
SCREAMING_SNAKE_CASE__ = len(__A ) // self.args.batch_size
SCREAMING_SNAKE_CASE__ = jnp.array(0 , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ = 0
for batch in tqdm(__A , total=__A , desc="""Evaluating ... """ ):
SCREAMING_SNAKE_CASE__ = self.data_collator(__A )
SCREAMING_SNAKE_CASE__ = self.val_step_fn(__A , **__A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _snake_case ( self :List[Any] , __A :Any , __A :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = jax_utils.unreplicate(__A )
print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(__A , params=state.params )
with open(os.path.join(__A , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__A , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(__A , """data_collator.joblib""" ) )
with open(os.path.join(__A , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , __A )
print("""DONE""" )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(UpperCamelCase__ , """flax_model.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase__ , """opt_state.msgpack""" ) , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = from_bytes(state.opt_state , f.read() )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """args.joblib""" ) )
SCREAMING_SNAKE_CASE__ = joblib.load(os.path.join(UpperCamelCase__ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCamelCase__ , """training_state.json""" ) , """r""" ) as f:
SCREAMING_SNAKE_CASE__ = json.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = num_train_steps - warmup_steps
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1e-7 , transition_steps=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple , UpperCamelCase__: Tuple ):
def weight_decay_mask(UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = traverse_util.flatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ )
return tx, lr | 6 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any ):
inspect_dataset(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = path + """.py"""
assert script_name in os.listdir(UpperCamelCase__ )
assert "__pycache__" not in os.listdir(UpperCamelCase__ )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
inspect_metric(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = path + """.py"""
assert script_name in os.listdir(UpperCamelCase__ )
assert "__pycache__" not in os.listdir(UpperCamelCase__ )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = get_dataset_config_info(UpperCamelCase__ , config_name=UpperCamelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: str , UpperCamelCase__: List[str] ):
with pytest.raises(UpperCamelCase__ ):
get_dataset_config_info(UpperCamelCase__ , config_name=UpperCamelCase__ )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = get_dataset_config_names(UpperCamelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: List[Any] , UpperCamelCase__: Dict ):
SCREAMING_SNAKE_CASE__ = get_dataset_infos(UpperCamelCase__ )
assert list(infos.keys() ) == expected_configs
SCREAMING_SNAKE_CASE__ = expected_configs[0]
assert expected_config in infos
SCREAMING_SNAKE_CASE__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: int , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = get_dataset_infos(UpperCamelCase__ )
assert expected_config in infos
SCREAMING_SNAKE_CASE__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: int ):
with pytest.raises(UpperCamelCase__ ):
get_dataset_split_names(UpperCamelCase__ , config_name=UpperCamelCase__ ) | 6 |
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' ) | 6 | 1 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "FlavaImageProcessor"
lowerCamelCase_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self :Optional[int] , __A :Optional[Any]=None , __A :str=None , **__A :Optional[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""feature_extractor""" )
SCREAMING_SNAKE_CASE__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :Dict , __A :Optional[ImageInput] = None , __A :Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __A :bool = True , __A :Union[bool, str, PaddingStrategy] = False , __A :Union[bool, str, TruncationStrategy] = False , __A :Optional[int] = None , __A :int = 0 , __A :Optional[int] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = False , __A :bool = True , __A :Optional[Union[str, TensorType]] = None , **__A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(
__A , return_image_mask=__A , return_codebook_pixels=__A , return_tensors=__A , **__A , )
if text is not None and images is not None:
encoding.update(__A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :int , *__A :Dict , **__A :int ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :Union[str, Any] , *__A :Tuple , **__A :List[str] ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __A , )
return self.image_processor_class
@property
def _snake_case ( self :Optional[int] ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __A , )
return self.image_processor | 6 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained(
"""microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 847
SCREAMING_SNAKE_CASE__ = """maskformer-ade20k-full-id2label.json"""
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 150
SCREAMING_SNAKE_CASE__ = """ade20k-id2label.json"""
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 171
SCREAMING_SNAKE_CASE__ = """maskformer-coco-stuff-id2label.json"""
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE__ = 133
SCREAMING_SNAKE_CASE__ = """coco-panoptic-id2label.json"""
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 19
SCREAMING_SNAKE_CASE__ = """cityscapes-id2label.json"""
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 65
SCREAMING_SNAKE_CASE__ = """mapillary-vistas-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") )
rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") )
rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") )
rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") )
# heads on top
rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") )
rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") )
rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] ):
SCREAMING_SNAKE_CASE__ = dct.pop(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = val
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: bool = False ):
SCREAMING_SNAKE_CASE__ = get_maskformer_config(UpperCamelCase__ )
# load original state_dict
with open(UpperCamelCase__ , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = data["""model"""]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_swin_q_k_v(UpperCamelCase__ , config.backbone_config )
read_in_decoder_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(UpperCamelCase__ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(UpperCamelCase__ )
model.eval()
for name, param in model.named_parameters():
print(UpperCamelCase__ , param.shape )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCamelCase__ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE__ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE__ = 65
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE__ = 65_535
else:
SCREAMING_SNAKE_CASE__ = 255
SCREAMING_SNAKE_CASE__ = True if """ade""" in model_name else False
SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=UpperCamelCase__ , reduce_labels=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(UpperCamelCase__ , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase__ )
print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print("""Pushing model and image processor to the hub...""" )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
) | 6 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_lowerCamelCase = False
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self :Any ) -> Optional[Any]:
"""simple docstring"""
return 12
@property
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
return 12
@property
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def _snake_case ( self :List[str] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _snake_case ( self :Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__A )
@property
def _snake_case ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
SCREAMING_SNAKE_CASE__ = TransformeraDModel(**__A )
return model
def _snake_case ( self :List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu"""
SCREAMING_SNAKE_CASE__ = self.dummy_vqvae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_transformer
SCREAMING_SNAKE_CASE__ = VQDiffusionScheduler(self.num_embed )
SCREAMING_SNAKE_CASE__ = LearnedClassifierFreeSamplingEmbeddings(learnable=__A )
SCREAMING_SNAKE_CASE__ = VQDiffusionPipeline(
vqvae=__A , text_encoder=__A , tokenizer=__A , transformer=__A , scheduler=__A , learned_classifier_free_sampling_embeddings=__A , )
SCREAMING_SNAKE_CASE__ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = """teddy bear playing in the pool"""
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe([prompt] , generator=__A , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
[prompt] , generator=__A , output_type="""np""" , return_dict=__A , num_inference_steps=2 )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu"""
SCREAMING_SNAKE_CASE__ = self.dummy_vqvae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ = self.dummy_transformer
SCREAMING_SNAKE_CASE__ = VQDiffusionScheduler(self.num_embed )
SCREAMING_SNAKE_CASE__ = LearnedClassifierFreeSamplingEmbeddings(
learnable=__A , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
SCREAMING_SNAKE_CASE__ = VQDiffusionPipeline(
vqvae=__A , text_encoder=__A , tokenizer=__A , transformer=__A , scheduler=__A , learned_classifier_free_sampling_embeddings=__A , )
SCREAMING_SNAKE_CASE__ = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
SCREAMING_SNAKE_CASE__ = """teddy bear playing in the pool"""
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe([prompt] , generator=__A , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
[prompt] , generator=__A , output_type="""np""" , return_dict=__A , num_inference_steps=2 )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :List[Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
SCREAMING_SNAKE_CASE__ = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
SCREAMING_SNAKE_CASE__ = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
SCREAMING_SNAKE_CASE__ = torch.Generator(device=__A ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=__A , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0 | 6 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase = 0
_lowerCamelCase = 0XE0_00
_lowerCamelCase = 0XE0_01
_lowerCamelCase = 0XE0_02
_lowerCamelCase = 0XE0_03
_lowerCamelCase = 0XE0_04
# Maps special codepoints to human-readable names.
_lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def _snake_case ( self :Tuple , __A :str ) -> List[str]:
"""simple docstring"""
return list(__A )
def _snake_case ( self :Optional[Any] , __A :str ) -> int:
"""simple docstring"""
try:
return ord(__A )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _snake_case ( self :str , __A :int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__A )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any:
"""simple docstring"""
return "".join(__A )
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
result += ([0] * len(__A )) + [1]
return result
def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any:
"""simple docstring"""
return () | 6 | 1 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCamelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class UpperCamelCase_ ( unittest.TestCase ):
def __init__( self :List[str] , __A :List[Any] , __A :Optional[int]=7 , __A :Tuple=3 , __A :Tuple=18 , __A :Tuple=30 , __A :Union[str, Any]=400 , __A :List[Any]=None , __A :List[Any]=True , __A :Any=True , __A :str=None , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""height""": 20, """width""": 20}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = do_convert_rgb
SCREAMING_SNAKE_CASE__ = [512, 1024, 2048, 4096]
SCREAMING_SNAKE_CASE__ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _snake_case ( self :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(__A , stream=__A ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = PixaStructImageProcessor if is_vision_available() else None
def _snake_case ( self :Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = PixaStructImageProcessingTester(self )
@property
def _snake_case ( self :Optional[int] ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self :int ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_convert_rgb""" ) )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processor_tester.prepare_dummy_image()
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = 2048
SCREAMING_SNAKE_CASE__ = image_processor(__A , return_tensors="""pt""" , max_patches=__A )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1E-3 , rtol=1E-3 ) )
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processor(
__A , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _snake_case ( self :List[Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
SCREAMING_SNAKE_CASE__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__A ):
SCREAMING_SNAKE_CASE__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__A ).flattened_patches
SCREAMING_SNAKE_CASE__ = """Hello"""
SCREAMING_SNAKE_CASE__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__A , header_text=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processor(
__A , return_tensors="""pt""" , max_patches=__A , header_text=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _snake_case ( self :int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
SCREAMING_SNAKE_CASE__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processor(
__A , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def _snake_case ( self :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processor(
__A , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = PixaStructImageProcessor if is_vision_available() else None
def _snake_case ( self :Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = PixaStructImageProcessingTester(self , num_channels=4 )
SCREAMING_SNAKE_CASE__ = 3
@property
def _snake_case ( self :Optional[Any] ) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_convert_rgb""" ) )
def _snake_case ( self :int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processor(
__A , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) | 6 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
_lowerCamelCase = 'bert-base-cased'
_lowerCamelCase = 'fp16'
_lowerCamelCase = 'bf16'
_lowerCamelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = f'''{i + 1}'''
SCREAMING_SNAKE_CASE__ = strategy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = prefetch_policy
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self :List[str] ) -> List[str]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = state_dict_type
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoModel.from_pretrained(__A )
for policy in FSDP_AUTO_WRAP_POLICY:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
SCREAMING_SNAKE_CASE__ = """2000"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """TRANSFORMER_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """T5Layer"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
with self.assertRaises(__A ) as cm:
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = """SIZE_BASED_WRAP"""
SCREAMING_SNAKE_CASE__ = """0"""
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__A )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = mp_dtype
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = Accelerator()
if mp_dtype == "fp16":
SCREAMING_SNAKE_CASE__ = torch.floataa
elif mp_dtype == "bf16":
SCREAMING_SNAKE_CASE__ = torch.bfloataa
SCREAMING_SNAKE_CASE__ = MixedPrecision(param_dtype=__A , reduce_dtype=__A , buffer_dtype=__A )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __A )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __A ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
SCREAMING_SNAKE_CASE__ = self.dist_env.copy()
SCREAMING_SNAKE_CASE__ = str(__A ).lower()
with mockenv_context(**__A ):
SCREAMING_SNAKE_CASE__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__A ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCamelCase_ ( UpperCamelCase__ ):
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = 0.8_2
SCREAMING_SNAKE_CASE__ = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
SCREAMING_SNAKE_CASE__ = {
"""multi_gpu_fp16""": 3200,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = 160
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self :Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_performance.py""" )
SCREAMING_SNAKE_CASE__ = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
SCREAMING_SNAKE_CASE__ = cmd.copy()
for i, strategy in enumerate(__A ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
SCREAMING_SNAKE_CASE__ = len(__A )
for state_dict_type in FSDP_STATE_DICT_TYPE:
SCREAMING_SNAKE_CASE__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
SCREAMING_SNAKE_CASE__ = cmd_config[:-1]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
SCREAMING_SNAKE_CASE__ = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
SCREAMING_SNAKE_CASE__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(__A ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() ) | 6 | 1 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_337 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: SplitDict ):
SCREAMING_SNAKE_CASE__ = split_dict._to_yaml_list()
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = SplitDict._from_yaml_list(UpperCamelCase__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE__ = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE__ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=UpperCamelCase__ ), SplitInfo(dataset_name="""my_dataset""" )] )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
SCREAMING_SNAKE_CASE__ = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 6 |
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
_lowerCamelCase = logging.get_logger(__name__)
# General docstring
_lowerCamelCase = 'PoolFormerConfig'
# Base docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = [1, 512, 7, 7]
# Image classification docstring
_lowerCamelCase = 'sail/poolformer_s12'
_lowerCamelCase = 'tabby, tabby cat'
_lowerCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] , UpperCamelCase__: float = 0.0 , UpperCamelCase__: bool = False ):
if drop_prob == 0.0 or not training:
return input
SCREAMING_SNAKE_CASE__ = 1 - drop_prob
SCREAMING_SNAKE_CASE__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
SCREAMING_SNAKE_CASE__ = keep_prob + torch.rand(UpperCamelCase__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
SCREAMING_SNAKE_CASE__ = input.div(UpperCamelCase__ ) * random_tensor
return output
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Optional[float] = None ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = drop_prob
def _snake_case ( self :Any , __A :torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return drop_path(__A , self.drop_prob , self.training )
def _snake_case ( self :Dict ) -> str:
"""simple docstring"""
return "p={}".format(self.drop_prob )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Dict , __A :Optional[Any] , __A :Dict , __A :List[str] , __A :Optional[Any] , __A :Tuple , __A :Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = patch_size if isinstance(__A , collections.abc.Iterable ) else (patch_size, patch_size)
SCREAMING_SNAKE_CASE__ = stride if isinstance(__A , collections.abc.Iterable ) else (stride, stride)
SCREAMING_SNAKE_CASE__ = padding if isinstance(__A , collections.abc.Iterable ) else (padding, padding)
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , kernel_size=__A , stride=__A , padding=__A )
SCREAMING_SNAKE_CASE__ = norm_layer(__A ) if norm_layer else nn.Identity()
def _snake_case ( self :Dict , __A :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.projection(__A )
SCREAMING_SNAKE_CASE__ = self.norm(__A )
return embeddings
class UpperCamelCase_ ( nn.GroupNorm ):
def __init__( self :Dict , __A :Tuple , **__A :Union[str, Any] ) -> Dict:
"""simple docstring"""
super().__init__(1 , __A , **__A )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :List[str] , __A :Optional[int] ) -> Any:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.AvgPoolad(__A , stride=1 , padding=pool_size // 2 , count_include_pad=__A )
def _snake_case ( self :Any , __A :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.pool(__A ) - hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Optional[Any] , __A :Tuple , __A :Dict , __A :int , __A :Any ) -> str:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A )
if isinstance(config.hidden_act , __A ):
SCREAMING_SNAKE_CASE__ = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE__ = config.hidden_act
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.act_fn(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
SCREAMING_SNAKE_CASE__ = self.conva(__A )
SCREAMING_SNAKE_CASE__ = self.drop(__A )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Any , __A :str , __A :List[str] , __A :Tuple , __A :Dict , __A :Union[str, Any] , __A :int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = PoolFormerPooling(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerOutput(__A , __A , __A , __A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(__A )
# Useful for training neural nets
SCREAMING_SNAKE_CASE__ = PoolFormerDropPath(__A ) if drop_path > 0.0 else nn.Identity()
SCREAMING_SNAKE_CASE__ = config.use_layer_scale
if config.use_layer_scale:
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((__A) ) , requires_grad=__A )
def _snake_case ( self :Optional[Any] , __A :Optional[int] ) -> str:
"""simple docstring"""
if self.use_layer_scale:
SCREAMING_SNAKE_CASE__ = self.pooling(self.before_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = ()
SCREAMING_SNAKE_CASE__ = self.output(self.after_norm(__A ) )
SCREAMING_SNAKE_CASE__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
SCREAMING_SNAKE_CASE__ = hidden_states + self.drop_path(__A )
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
else:
SCREAMING_SNAKE_CASE__ = self.drop_path(self.pooling(self.before_norm(__A ) ) )
# First residual connection
SCREAMING_SNAKE_CASE__ = pooling_output + hidden_states
SCREAMING_SNAKE_CASE__ = ()
# Second residual connection inside the PoolFormerOutput block
SCREAMING_SNAKE_CASE__ = self.drop_path(self.output(self.after_norm(__A ) ) )
SCREAMING_SNAKE_CASE__ = hidden_states + layer_output
SCREAMING_SNAKE_CASE__ = (output,) + outputs
return outputs
class UpperCamelCase_ ( nn.Module ):
def __init__( self :Union[str, Any] , __A :List[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = config
# stochastic depth decay rule
SCREAMING_SNAKE_CASE__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
# Transformer blocks
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
SCREAMING_SNAKE_CASE__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__A , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__A ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList(__A )
def _snake_case ( self :str , __A :Tuple , __A :Dict=False , __A :Tuple=True ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = () if output_hidden_states else None
SCREAMING_SNAKE_CASE__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = layers
# Get patch embeddings from hidden_states
SCREAMING_SNAKE_CASE__ = embedding_layer(__A )
# Send the embeddings through the blocks
for _, blk in enumerate(__A ):
SCREAMING_SNAKE_CASE__ = blk(__A )
SCREAMING_SNAKE_CASE__ = layer_outputs[0]
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__A , hidden_states=__A )
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PoolFormerConfig
lowerCamelCase_ = "poolformer"
lowerCamelCase_ = "pixel_values"
lowerCamelCase_ = True
def _snake_case ( self :Optional[Any] , __A :Tuple ) -> Dict:
"""simple docstring"""
if isinstance(__A , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__A , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self :str , __A :Optional[Any] , __A :Union[str, Any]=False ) -> Any:
"""simple docstring"""
if isinstance(__A , __A ):
SCREAMING_SNAKE_CASE__ = value
_lowerCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Any ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = PoolFormerEncoder(__A )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self :Dict , __A :Optional[torch.FloatTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
SCREAMING_SNAKE_CASE__ = self.encoder(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__A , hidden_states=encoder_outputs.hidden_states , )
class UpperCamelCase_ ( nn.Module ):
def __init__( self :int , __A :Optional[int] ) -> Tuple:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self :List[Any] , __A :Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dense(__A )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :str , __A :Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__(__A )
SCREAMING_SNAKE_CASE__ = config.num_labels
SCREAMING_SNAKE_CASE__ = PoolFormerModel(__A )
# Final norm
SCREAMING_SNAKE_CASE__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
SCREAMING_SNAKE_CASE__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self :int , __A :Optional[torch.FloatTensor] = None , __A :Optional[torch.LongTensor] = None , __A :Optional[bool] = None , __A :Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = self.poolformer(
__A , output_hidden_states=__A , return_dict=__A , )
SCREAMING_SNAKE_CASE__ = outputs[0]
SCREAMING_SNAKE_CASE__ = self.classifier(self.norm(__A ).mean([-2, -1] ) )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ = """single_label_classification"""
else:
SCREAMING_SNAKE_CASE__ = """multi_label_classification"""
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ = loss_fct(__A , __A )
if not return_dict:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__A , logits=__A , hidden_states=outputs.hidden_states ) | 6 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ ( metaclass=UpperCamelCase__ ):
lowerCamelCase_ = ["torch", "scipy"]
def __init__( self :str , *__A :Union[str, Any] , **__A :str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """scipy"""] )
@classmethod
def _snake_case ( cls :List[str] , *__A :Union[str, Any] , **__A :str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """scipy"""] )
@classmethod
def _snake_case ( cls :Any , *__A :Union[str, Any] , **__A :Optional[int] ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch""", """scipy"""] ) | 6 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_lengths
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = gelu_activation
SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ = causal
SCREAMING_SNAKE_CASE__ = asm
SCREAMING_SNAKE_CASE__ = n_langs
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_special
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = summary_type
SCREAMING_SNAKE_CASE__ = use_proj
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__A )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 6 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCamelCase = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Union[str, Any] ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple=True ):
model.train()
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any]=False ):
set_seed(42 )
SCREAMING_SNAKE_CASE__ = RegressionModel()
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
SCREAMING_SNAKE_CASE__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple ):
# Test when on a single CPU or GPU that the context manager does nothing
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
# Test on distributed setup that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=False , UpperCamelCase__: Union[str, Any]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
SCREAMING_SNAKE_CASE__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple=False , UpperCamelCase__: List[str]=False ):
SCREAMING_SNAKE_CASE__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = batch.values()
# Gather the distributed inputs and targs for the base model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather((ddp_input, ddp_target) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
SCREAMING_SNAKE_CASE__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=80 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ = RegressionDataset(length=96 )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=16 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = Accelerator()
SCREAMING_SNAKE_CASE__ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 6 | 1 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowerCamelCase = Lock()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[int] , UpperCamelCase__: List[str] ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE__ = min(UpperCamelCase__ , UpperCamelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE__ = max(UpperCamelCase__ , UpperCamelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE__ = Pipe()
SCREAMING_SNAKE_CASE__ = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE__ = temp_rs
SCREAMING_SNAKE_CASE__ = temp_rr
for i in range(1 , len(UpperCamelCase__ ) - 1 ):
SCREAMING_SNAKE_CASE__ = Pipe()
SCREAMING_SNAKE_CASE__ = Pipe()
process_array_.append(
Process(
target=UpperCamelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE__ = temp_rs
SCREAMING_SNAKE_CASE__ = temp_rr
process_array_.append(
Process(
target=UpperCamelCase__ , args=(
len(UpperCamelCase__ ) - 1,
arr[len(UpperCamelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = odd_even_transposition(UpperCamelCase__ )
print("""Sorted List\n""" )
print(*UpperCamelCase__ )
if __name__ == "__main__":
main() | 6 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = ["image_processor", "tokenizer"]
lowerCamelCase_ = "AutoImageProcessor"
lowerCamelCase_ = "AutoTokenizer"
def __init__( self :Optional[int] , __A :Optional[Any] , __A :Dict ) -> Dict:
"""simple docstring"""
super().__init__(__A , __A )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self :int , __A :str=None , __A :int=None , __A :Union[str, Any]=None , **__A :str ) -> Optional[Any]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
SCREAMING_SNAKE_CASE__ = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _snake_case ( self :str , *__A :List[str] , **__A :List[str] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self :List[str] , *__A :Any , **__A :Any ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"] | 6 | 1 |
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
) | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
SCREAMING_SNAKE_CASE__ = True
for i in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
SCREAMING_SNAKE_CASE__ = dp[i][j - 1]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE__ = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
SCREAMING_SNAKE_CASE__ = s - 2 * j
break
return diff | 6 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCamelCase = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: float , UpperCamelCase__: float ):
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 6 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.