code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase_ : Optional[Any] = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 | 0 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
_UpperCamelCase : List[str] = XLMProphetNetTokenizer
_UpperCamelCase : Dict = False
_UpperCamelCase : Optional[int] = True
def lowercase ( self: Any ) -> str:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = XLMProphetNetTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = "[PAD]"
UpperCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def lowercase ( self: Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1012 )
def lowercase ( self: str ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowercase ( self: List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = XLMProphetNetTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def lowercase ( self: Dict ) -> int:
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def lowercase ( self: Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = "Hello World!"
UpperCamelCase_ = [35389, 6672, 49, 2]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def lowercase ( self: Any ) -> str:
"""simple docstring"""
UpperCamelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 713 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class _UpperCamelCase ( unittest.TestCase ):
def lowercase ( self: Union[str, Any] ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , "env" )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Dict ) -> str:
"""simple docstring"""
UpperCamelCase_ = f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
UpperCamelCase_ = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_SCREAMING_SNAKE_CASE , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_SCREAMING_SNAKE_CASE , py_version="py36" , )
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: Any ) -> List[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def lowercase ( self: str , _SCREAMING_SNAKE_CASE: Tuple ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.create_estimator(_SCREAMING_SNAKE_CASE )
# run training
estimator.fit()
# result dataframe
UpperCamelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCamelCase_ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _SCREAMING_SNAKE_CASE )
| 371 | 0 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_lowercase : Dict =float('''nan''')
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
A : Any =sys.stdout
A : str =open(lowerCAmelCase_ , 'a' )
def __getattr__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
return getattr(self.stdout , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
self.stdout.write(lowerCAmelCase_ )
# strip tqdm codes
self.file.write(re.sub(R'^.*\r' , '' , lowerCAmelCase_ , 0 , re.M ) )
def A__ ( lowercase: Optional[int]=80, lowercase: List[str]=False ) -> List[str]:
A : List[str] =[]
# deal with critical env vars
A : Optional[int] =['CUDA_VISIBLE_DEVICES']
for key in env_keys:
A : int =os.environ.get(snake_case__, snake_case__ )
if val is not None:
cmd.append(F'{key}={val}' )
# python executable (not always needed if the script is executable)
A : Any =sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(snake_case__ )
# now the normal args
cmd += list(map(shlex.quote, sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
A : int =[]
A : Tuple =''
while len(snake_case__ ) > 0:
current_line += F'{cmd.pop(0 )} '
if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case__ )
A : Tuple =''
return "\\\n".join(snake_case__ )
def A__ ( lowercase: Optional[Any], lowercase: List[Any] ) -> Optional[int]:
# unwrap multi-line input
A : Optional[Any] =re.sub(r'[\\\n]+', ' ', args.base_cmd )
# remove --output_dir if any and set our own
A : Union[str, Any] =re.sub('--output_dir\s+[^\s]+', '', args.base_cmd )
args.base_cmd += F' --output_dir {output_dir}'
# ensure we have --overwrite_output_dir
A : Tuple =re.sub('--overwrite_output_dir\s+', '', args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def A__ ( lowercase: int, lowercase: Any, lowercase: Tuple, lowercase: Dict, lowercase: List[Any], lowercase: Dict, lowercase: List[str] ) -> List[str]:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0, 100 ) for k in metric_keys}, **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )}, )
A : Union[str, Any] =subprocess.run(snake_case__, capture_output=snake_case__, text=snake_case__ )
if verbose:
print('STDOUT', result.stdout )
print('STDERR', result.stderr )
# save the streams
A : Dict =variation.replace(' ', '-' )
with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt', 'w' ) as f:
f.write(result.stdout )
with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt', 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(F'{output_dir}/all_results.json', 'r', encoding='utf-8' ) as f:
A : str =json.load(snake_case__ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def A__ ( lowercase: Tuple, lowercase: List[Any], lowercase: Tuple, lowercase: List[Any], lowercase: Union[str, Any], lowercase: int, lowercase: Any, lowercase: List[Any], lowercase: Optional[Any], lowercase: Any, ) -> Tuple:
A : str =[]
A : Union[str, Any] =[]
A : int =F'{id}: {variation:<{longest_variation_len}}'
A : Tuple =F'{preamble}: '
A : List[str] =set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case__ ), desc=snake_case__, leave=snake_case__ ):
A : Any =process_run_single(
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ )
A : Tuple =single_run_metrics[target_metric_key]
if not math.isnan(snake_case__ ):
metrics.append(snake_case__ )
results.append(snake_case__ )
outcome += "✓"
else:
outcome += "✘"
A : Optional[int] =F'\33[2K\r{outcome}'
if len(snake_case__ ) > 0:
A : Dict ={k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
A : Union[str, Any] =round(mean_metrics[target_metric_key], 2 )
A : int =F'{outcome} {mean_target}'
if len(snake_case__ ) > 1:
results_str += F' {tuple(round(snake_case__, 2 ) for x in results )}'
print(snake_case__ )
A : List[Any] =variation
return mean_metrics
else:
print(snake_case__ )
return {variation_key: variation, target_metric_key: nan}
def A__ ( ) -> Tuple:
A : List[str] =torch.cuda.get_device_properties(torch.device('cuda' ) )
return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'
def A__ ( lowercase: Dict, lowercase: str, lowercase: Any, lowercase: Optional[Any], lowercase: Optional[int] ) -> Optional[int]:
A : Any =pd.DataFrame(snake_case__ )
A : List[Any] ='variation'
A : List[Any] ='diff_%'
A : Any =nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
A : Optional[Any] =df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case__ ):
# as a fallback, use the minimal value as the sentinel
A : Tuple =df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case__ ):
A : List[str] =df.apply(
lambda lowercase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0, axis='columns', )
# re-order columns
A : int =[variation_key, target_metric_key, diff_key, *report_metric_keys]
A : Any =df.reindex(snake_case__, axis='columns' ) # reorder cols
# capitalize
A : Tuple =df.rename(str.capitalize, axis='columns' )
# make the cols as narrow as possible
A : Any =df.rename(lambda lowercase : c.replace('_', '<br>' ), axis='columns' )
A : List[str] =df.rename(lambda lowercase : c.replace('_', '\n' ), axis='columns' )
A : int =['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case__, floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case__, floatfmt='.2f' )]
print('\n\n'.join(snake_case__ ) )
def A__ ( ) -> Tuple:
A : Any =argparse.ArgumentParser()
parser.add_argument(
'--base-cmd', default=snake_case__, type=snake_case__, required=snake_case__, help='Base cmd', )
parser.add_argument(
'--variations', default=snake_case__, type=snake_case__, nargs='+', required=snake_case__, help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'', )
parser.add_argument(
'--base-variation', default=snake_case__, type=snake_case__, help='Baseline variation to compare to. if None the minimal target value will be used to compare against', )
parser.add_argument(
'--target-metric-key', default=snake_case__, type=snake_case__, required=snake_case__, help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second', )
parser.add_argument(
'--report-metric-keys', default='', type=snake_case__, help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples', )
parser.add_argument(
'--repeat-times', default=1, type=snake_case__, help='How many times to re-run each variation - an average will be reported', )
parser.add_argument(
'--output_dir', default='output_benchmark', type=snake_case__, help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked', )
parser.add_argument(
'--verbose', default=snake_case__, action='store_true', help='Whether to show the outputs of each run or just the benchmark progress', )
A : Tuple =parser.parse_args()
A : Dict =args.output_dir
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
A : Tuple =get_base_command(snake_case__, snake_case__ )
# split each dimension into its --foo variations
A : List[str] =[list(map(str.strip, re.split(r'\|', snake_case__ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
A : Any =list(map(str.strip, map(' '.join, itertools.product(*snake_case__ ) ) ) )
A : int =max(len(snake_case__ ) for x in variations )
# split wanted keys
A : Dict =args.report_metric_keys.split()
# capture prints into a log file for convenience
A : Dict =F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'
print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' )
print(F'and this script\'s output is also piped into {report_fn}' )
A : Dict =Tee(snake_case__ )
print(F'\n*** Running {len(snake_case__ )} benchmarks:' )
print(F'Base command: {" ".join(snake_case__ )}' )
A : List[str] ='variation'
A : List[str] =[]
for id, variation in enumerate(tqdm(snake_case__, desc='Total completion: ', leave=snake_case__ ) ):
A : Union[str, Any] =base_cmd + variation.split()
results.append(
process_run(
id + 1, snake_case__, snake_case__, snake_case__, snake_case__, args.target_metric_key, snake_case__, args.repeat_times, snake_case__, args.verbose, ) )
process_results(snake_case__, args.target_metric_key, snake_case__, args.base_variation, snake_case__ )
if __name__ == "__main__":
main()
| 305 | import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowerCAmelCase )} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
lowerCamelCase :Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
lowerCamelCase :bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
lowerCamelCase :float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
lowerCamelCase :float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
lowerCamelCase :int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
lowerCamelCase :int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
lowerCamelCase :bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def snake_case ( snake_case__ :DataTrainingArguments , snake_case__ :PreTrainedTokenizer , snake_case__ :bool = False , snake_case__ :Optional[str] = None , ) -> Optional[int]:
def _dataset(snake_case__ :Optional[int] , snake_case__ :Optional[int]=None):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""")
return LineByLineWithRefDataset(
tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , ref_path=snake_case__ , )
return LineByLineTextDataset(tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size)
else:
return TextDataset(
tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file)
elif args.train_data_files:
return ConcatDataset([_dataset(snake_case__) for f in glob(args.train_data_files)])
else:
return _dataset(args.train_data_file , args.train_ref_file)
def snake_case ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
_A , _A , _A = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""")
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""")
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , snake_case__)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_A = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
_A = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
_A = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""")
if model_args.tokenizer_name:
_A = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
_A = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""")
if model_args.model_name_or_path:
_A = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path) , config=snake_case__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""")
_A = AutoModelWithLMHead.from_config(snake_case__)
model.resize_token_embeddings(len(snake_case__))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""")
if data_args.block_size <= 0:
_A = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_A = min(data_args.block_size , tokenizer.max_len)
# Get datasets
_A = (
get_dataset(snake_case__ , tokenizer=snake_case__ , cache_dir=model_args.cache_dir) if training_args.do_train else None
)
_A = (
get_dataset(snake_case__ , tokenizer=snake_case__ , evaluate=snake_case__ , cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_A = DataCollatorForPermutationLanguageModeling(
tokenizer=snake_case__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_A = DataCollatorForWholeWordMask(
tokenizer=snake_case__ , mlm_probability=data_args.mlm_probability)
else:
_A = DataCollatorForLanguageModeling(
tokenizer=snake_case__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
_A = Trainer(
model=snake_case__ , args=snake_case__ , data_collator=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , prediction_loss_only=snake_case__ , )
# Training
if training_args.do_train:
_A = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=snake_case__)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""")
_A = trainer.evaluate()
_A = math.exp(eval_output["""eval_loss"""])
_A = {"""perplexity""": perplexity}
_A = os.path.join(training_args.output_dir , """eval_results_lm.txt""")
if trainer.is_world_master():
with open(snake_case__ , """w""") as writer:
logger.info("""***** Eval results *****""")
for key in sorted(result.keys()):
logger.info(""" %s = %s""" , snake_case__ , str(result[key]))
writer.write("""%s = %s\n""" % (key, str(result[key])))
results.update(snake_case__)
return results
def snake_case ( snake_case__ :Dict) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 401 | 0 |
from __future__ import annotations
lowerCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> tuple[list[list[int]], list[list[int]]]:
a__ : List[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCamelCase ) )
] # the reference grid
a__ : List[Any] = 1
a__ : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCamelCase ) )
] # the action grid
a__ : List[Any] = init[0]
a__ : Optional[Any] = init[1]
a__ : Union[str, Any] = 0
a__ : List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
a__ : int = [[f, g, x, y]]
a__ : int = False # flag that is set when search is complete
a__ : Union[str, Any] = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCamelCase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
a__ : Union[str, Any] = cell.pop()
a__ : Tuple = next_cell[2]
a__ : str = next_cell[3]
a__ : List[str] = next_cell[1]
if x == goal[0] and y == goal[1]:
a__ : List[str] = True
else:
for i in range(len(__UpperCamelCase ) ): # to try out different valid actions
a__ : Any = x + DIRECTIONS[i][0]
a__ : int = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
a__ : Dict = g + cost
a__ : Optional[int] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
a__ : int = 1
a__ : List[Any] = i
a__ : Optional[int] = []
a__ : Any = goal[0]
a__ : List[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
a__ : str = x - DIRECTIONS[action[x][y]][0]
a__ : List[str] = y - DIRECTIONS[action[x][y]][1]
a__ : Dict = xa
a__ : Any = ya
invpath.append([x, y] )
a__ : str = []
for i in range(len(__UpperCamelCase ) ):
path.append(invpath[len(__UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCamelCase = [0, 0]
# all coordinates are given in format [y,x]
lowerCamelCase = [len(grid) - 1, len(grid[0]) - 1]
lowerCamelCase = 1
# the cost map which pushes the path closer to the goal
lowerCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCamelCase = 99
lowerCamelCase , lowerCamelCase = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 207 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> int:
a__ : List[Any] = prime_factors(__UpperCamelCase )
if is_square_free(__UpperCamelCase ):
return -1 if len(__UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 1 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0, 0
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 2
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 3
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 5
for _ in range(1 , __snake_case ):
__SCREAMING_SNAKE_CASE = min(__snake_case , __snake_case , __snake_case )
ugly_nums.append(__snake_case )
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__SCREAMING_SNAKE_CASE = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"""{ugly_numbers(2_00) = }""")
| 693 |
def _A ( __snake_case :int ) -> int:
"""simple docstring"""
assert isinstance(__snake_case , __snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__SCREAMING_SNAKE_CASE = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(__snake_case )
else:
__SCREAMING_SNAKE_CASE = sylvester(number - 1 )
__SCREAMING_SNAKE_CASE = num - 1
__SCREAMING_SNAKE_CASE = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 693 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
'''simple docstring'''
super().__init__(
__UpperCamelCase , question_encoder_tokenizer=__UpperCamelCase , generator_tokenizer=__UpperCamelCase , index=__UpperCamelCase , init_retrieval=__UpperCamelCase , )
__a : int = None
def __lowerCamelCase ( self , __UpperCamelCase ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__a : Any = self._infer_socket_ifname()
# avoid clash with the NCCL port
__a : Any = str(distributed_port + 1 )
__a : Optional[int] = dist.new_group(ranks=__UpperCamelCase , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=torch.floataa ):
'''simple docstring'''
__a : Tuple = torch.empty(__UpperCamelCase , dtype=__UpperCamelCase )
dist.scatter(__UpperCamelCase , src=0 , scatter_list=__UpperCamelCase , group=self.process_group )
return target_tensor
def __lowerCamelCase ( self ):
'''simple docstring'''
__a : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__a : int = next((addr for addr in addrs if addr.startswith("""e""" )) , __UpperCamelCase )
return ifname
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if not dist.is_initialized():
__a : List[Any] = self._main_retrieve(__UpperCamelCase , __UpperCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCamelCase )
# distributed training
__a : List[Any] = dist.get_world_size(group=self.process_group )
# gather logic
__a : str = None
if self._is_main():
__a : List[str] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__UpperCamelCase )]
dist.gather(torch.tensor(__UpperCamelCase ) , dst=0 , gather_list=__UpperCamelCase , group=self.process_group )
# scatter logic
__a : Optional[Any] = question_hidden_states.shape[0]
__a : List[str] = []
__a : List[Any] = []
if self._is_main():
assert len(__UpperCamelCase ) == world_size
__a : Tuple = self._main_retrieve(torch.cat(__UpperCamelCase ).numpy() , __UpperCamelCase )
__a : Optional[Any] = torch.tensor(__UpperCamelCase ), torch.tensor(__UpperCamelCase )
__a : str = self._chunk_tensor(__UpperCamelCase , __UpperCamelCase )
__a : Any = self._chunk_tensor(__UpperCamelCase , __UpperCamelCase )
__a : Union[str, Any] = self._scattered(__UpperCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
__a : Any = self._scattered(__UpperCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__UpperCamelCase ) | 704 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : List[str] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[int] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 697 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__A = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__A = """UperNetConfig"""
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = False , __UpperCAmelCase = 1 , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Optional[int] = nn.Convad(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , kernel_size=__UpperCAmelCase , padding=__UpperCAmelCase , bias=__UpperCAmelCase , dilation=__UpperCAmelCase , )
lowerCAmelCase__ :Any = nn.BatchNormad(__UpperCAmelCase )
lowerCAmelCase__ :int = nn.ReLU()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.conv(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.batch_norm(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = self.activation(__UpperCAmelCase )
return output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Optional[int] = [
nn.AdaptiveAvgPoolad(__UpperCAmelCase ),
UperNetConvModule(__UpperCAmelCase , __UpperCAmelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = input
for layer in self.layers:
lowerCAmelCase__ :Any = layer(__UpperCAmelCase )
return hidden_state
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Union[str, Any] = pool_scales
lowerCAmelCase__ :Union[str, Any] = align_corners
lowerCAmelCase__ :List[str] = in_channels
lowerCAmelCase__ :Any = channels
lowerCAmelCase__ :Optional[int] = []
for i, pool_scale in enumerate(__UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = UperNetPyramidPoolingBlock(pool_scale=__UpperCAmelCase , in_channels=__UpperCAmelCase , channels=__UpperCAmelCase )
self.blocks.append(__UpperCAmelCase )
self.add_module(str(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = []
for ppm in self.blocks:
lowerCAmelCase__ :Optional[Any] = ppm(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = nn.functional.interpolate(
__UpperCAmelCase , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(__UpperCAmelCase )
return ppm_outs
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Union[str, Any] = config
lowerCAmelCase__ :List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
lowerCAmelCase__ :str = in_channels
lowerCAmelCase__ :Optional[int] = config.hidden_size
lowerCAmelCase__ :Optional[Any] = False
lowerCAmelCase__ :Tuple = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowerCAmelCase__ :Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowerCAmelCase__ :List[str] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowerCAmelCase__ :str = nn.ModuleList()
lowerCAmelCase__ :Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowerCAmelCase__ :str = UperNetConvModule(__UpperCAmelCase , self.channels , kernel_size=1 )
lowerCAmelCase__ :Optional[int] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__UpperCAmelCase )
self.fpn_convs.append(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def snake_case ( self ):
'''simple docstring'''
self.apply(self._init_weights )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = inputs[-1]
lowerCAmelCase__ :Optional[int] = [x]
psp_outs.extend(self.psp_modules(__UpperCAmelCase ) )
lowerCAmelCase__ :Dict = torch.cat(__UpperCAmelCase , dim=1 )
lowerCAmelCase__ :Dict = self.bottleneck(__UpperCAmelCase )
return output
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__UpperCAmelCase ) )
# build top-down path
lowerCAmelCase__ :Any = len(__UpperCAmelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCAmelCase__ :Dict = laterals[i - 1].shape[2:]
lowerCAmelCase__ :int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__UpperCAmelCase , mode='bilinear' , align_corners=self.align_corners )
# build outputs
lowerCAmelCase__ :int = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowerCAmelCase__ :Optional[Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
lowerCAmelCase__ :List[Any] = torch.cat(__UpperCAmelCase , dim=1 )
lowerCAmelCase__ :Union[str, Any] = self.fpn_bottleneck(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = self.classifier(__UpperCAmelCase )
return output
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = 2 , __UpperCAmelCase = 3 , __UpperCAmelCase = 1 ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :int = config
lowerCAmelCase__ :Tuple = config.auxiliary_in_channels
lowerCAmelCase__ :List[str] = config.auxiliary_channels
lowerCAmelCase__ :Optional[Any] = config.auxiliary_num_convs
lowerCAmelCase__ :Dict = config.auxiliary_concat_input
lowerCAmelCase__ :Union[str, Any] = in_index
lowerCAmelCase__ :str = (kernel_size // 2) * dilation
lowerCAmelCase__ :Dict = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__UpperCAmelCase , padding=__UpperCAmelCase , dilation=__UpperCAmelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__UpperCAmelCase , padding=__UpperCAmelCase , dilation=__UpperCAmelCase ) )
if self.num_convs == 0:
lowerCAmelCase__ :str = nn.Identity()
else:
lowerCAmelCase__ :Tuple = nn.Sequential(*__UpperCAmelCase )
if self.concat_input:
lowerCAmelCase__ :Any = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__UpperCAmelCase , padding=kernel_size // 2 )
lowerCAmelCase__ :Dict = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def snake_case ( self ):
'''simple docstring'''
self.apply(self._init_weights )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = encoder_hidden_states[self.in_index]
lowerCAmelCase__ :Any = self.convs(__UpperCAmelCase )
if self.concat_input:
lowerCAmelCase__ :str = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowerCAmelCase__ :Tuple = self.classifier(__UpperCAmelCase )
return output
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[Any] = UperNetConfig
__magic_name__ :List[str] = """pixel_values"""
__magic_name__ :Any = True
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def snake_case ( self ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :Optional[Any] = value
__A = R"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__A = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , a , )
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowerCAmelCase__ :Union[str, Any] = UperNetHead(__UpperCAmelCase , in_channels=self.backbone.channels )
lowerCAmelCase__ :Optional[int] = UperNetFCNHead(__UpperCAmelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=__UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def snake_case ( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ :str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ :str = output_attentions if output_attentions is not None else self.config.output_attentions
lowerCAmelCase__ :List[Any] = self.backbone.forward_with_filtered_kwargs(
__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , output_attentions=__UpperCAmelCase )
lowerCAmelCase__ :List[str] = outputs.feature_maps
lowerCAmelCase__ :Optional[Any] = self.decode_head(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = nn.functional.interpolate(__UpperCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=__UpperCAmelCase )
lowerCAmelCase__ :int = None
if self.auxiliary_head is not None:
lowerCAmelCase__ :Union[str, Any] = self.auxiliary_head(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = nn.functional.interpolate(
__UpperCAmelCase , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
lowerCAmelCase__ :List[str] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowerCAmelCase__ :Optional[int] = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = loss_fct(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowerCAmelCase__ :Any = (logits,) + outputs[1:]
else:
lowerCAmelCase__ :List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__UpperCAmelCase , logits=__UpperCAmelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 93 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase = {
'configuration_bridgetower': [
'BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BridgeTowerConfig',
'BridgeTowerTextConfig',
'BridgeTowerVisionConfig',
],
'processing_bridgetower': ['BridgeTowerProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['BridgeTowerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST',
'BridgeTowerForContrastiveLearning',
'BridgeTowerForImageAndTextRetrieval',
'BridgeTowerForMaskedLM',
'BridgeTowerModel',
'BridgeTowerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure) | 201 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A ( UpperCAmelCase , UpperCAmelCase ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_snake_case : Tuple = torch.permute(UpperCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCAmelCase ):
# linear layer
_snake_case : str = flax_key_tuple[:-1] + ("weight",)
_snake_case : str = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : List[str] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if "metadata" in layer:
_snake_case : Union[str, Any] = layer.split("metadata" )
_snake_case : Dict = "".join(split_layer[0] )[:-1]
_snake_case : Union[str, Any] = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_snake_case : Tuple = layer.split("kvstore" )
_snake_case : Optional[Any] = "".join(split_layer[0] )[:-1]
_snake_case : List[str] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_snake_case : Any = layer.split("/" )
_snake_case : List[Any] = "/".join(split_layer[:-1] )
_snake_case : Tuple = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case : Optional[Any] = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
_snake_case : List[str] = "file"
else:
_snake_case : Tuple = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A ( UpperCAmelCase , UpperCAmelCase ):
_snake_case : Tuple = rename_keys(UpperCAmelCase )
_snake_case : List[str] = {}
for k, v in current_block.items():
_snake_case : Optional[int] = v
_snake_case : Tuple = new_current_block
torch.save(UpperCAmelCase , UpperCAmelCase )
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = WEIGHTS_NAME ):
_snake_case : Tuple = convert_file_size_to_int(UpperCAmelCase )
_snake_case : str = []
_snake_case : Union[str, Any] = {}
_snake_case : Dict = 0
_snake_case : Any = 0
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_snake_case : int = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_snake_case : Optional[Any] = flatten_dict(UpperCAmelCase , sep="/" )
_snake_case : List[str] = {}
for layer in checkpoint_info.keys():
_snake_case , _snake_case , _snake_case : Any = get_key_and_tensorstore_dict(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if curr_real_layer_name in all_layers:
_snake_case : str = content
else:
_snake_case : str = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case : Tuple = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case : str = torch.tensor(UpperCAmelCase )
_snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case , _snake_case : Tuple = rename_base_flax_keys(tuple(key.split("/" ) ) , UpperCAmelCase )
_snake_case : Dict = "/".join(UpperCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case : Dict = os.path.join(
UpperCAmelCase , weights_name.replace(".bin" , F"""-{len(UpperCAmelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(UpperCAmelCase , UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case : Dict = {}
_snake_case : Any = 0
_snake_case : Optional[int] = raw_weights.to(getattr(UpperCAmelCase , UpperCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case : List[str] = os.path.join(UpperCAmelCase , weights_name.replace(".bin" , F"""-{len(UpperCAmelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(UpperCAmelCase , UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case : Dict = {}
_snake_case : str = {}
for idx, shard in enumerate(UpperCAmelCase ):
_snake_case : Dict = weights_name.replace(
".bin" , F"""-{idx+1:05d}-of-{len(UpperCAmelCase ):05d}.bin""" ) # len(sharded_state_dicts):05d}
_snake_case : Tuple = os.path.join(UpperCAmelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(UpperCAmelCase , os.path.join(UpperCAmelCase , UpperCAmelCase ) )
_snake_case : Dict = shard
for key in shard:
_snake_case : int = shard_file
# Add the metadata
_snake_case : Union[str, Any] = {"total_size": total_size}
_snake_case : str = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(UpperCAmelCase , UpperCAmelCase ) , "w" , encoding="utf-8" ) as f:
_snake_case : Tuple = json.dumps(UpperCAmelCase , indent=2 , sort_keys=UpperCAmelCase ) + "\n"
f.write(UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
__lowerCAmelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__lowerCAmelCase :Tuple = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_snake_case : Tuple = TaTokenizer.from_pretrained("t5-small" )
_snake_case : str = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_snake_case : Tuple = tokenizer(UpperCAmelCase , return_tensors="pt" ).input_ids
_snake_case : Any = model.generate(UpperCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 278 |
import sys
import turtle
def A ( UpperCAmelCase , UpperCAmelCase ):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 )
triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 )
triangle(UpperCAmelCase , get_mid(UpperCAmelCase , UpperCAmelCase ) , get_mid(UpperCAmelCase , UpperCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
__lowerCAmelCase :Any = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
__lowerCAmelCase :Optional[int] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1])) | 278 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase : List[str] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__UpperCAmelCase : List[str] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__UpperCAmelCase : Union[str, Any] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return float((preds == labels).mean() )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : int = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
_a : List[Any] = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : str = float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )
_a : str = float(spearmanr(UpperCamelCase_ , UpperCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def snake_case_ ( self : Dict ) -> Union[str, Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_ ( self : Optional[int] , __snake_case : Any , __snake_case : Any ) -> Union[str, Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case , __snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case , __snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 471 |
import math
def lowerCamelCase_ ( UpperCamelCase_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase_ ( UpperCamelCase_ = 0.1 ):
_a : int = 3
_a : List[str] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCamelCase_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 471 | 1 |
def lowercase ( __A : List[Any] , __A : Optional[int] , __A : int ) -> List[Any]:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(SCREAMING_SNAKE_CASE_ , n - 1 , SCREAMING_SNAKE_CASE_ ) * a) % mod
else:
snake_case : List[str] = binary_exponentiation(SCREAMING_SNAKE_CASE_ , n / 2 , SCREAMING_SNAKE_CASE_ )
return (b * b) % mod
# a prime number
__lowercase : Optional[int] = 701
__lowercase : Tuple = 1_000_000_000
__lowercase : List[Any] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 702 |
from __future__ import annotations
from collections import Counter
from random import random
class _A :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
snake_case : Optional[Any] = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : int = {}
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
if nodea not in self.connections:
self.add_node(SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = probability
def snake_case_ ( self ):
'''simple docstring'''
return list(self.connections )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : Union[str, Any] = 0
snake_case : Optional[int] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowercase ( __A : str , __A : list[tuple[str, str, float]] , __A : int ) -> dict[str, int]:
'''simple docstring'''
snake_case : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__A , __A , __A )
snake_case : Dict = Counter(graph.get_nodes() )
snake_case : int = start
for _ in range(__A ):
snake_case : Optional[int] = graph.transition(__A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315 | 0 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : int = {'UserAgent': UserAgent().random}
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> dict:
"""simple docstring"""
UpperCAmelCase = script.contents[0]
UpperCAmelCase = json.loads(data[data.find('''{"config"''' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : str , a__ : str ):
UpperCAmelCase = f"https://www.instagram.com/{username}/"
UpperCAmelCase = self.get_json()
def __snake_case ( self : Any ):
UpperCAmelCase = requests.get(self.url , headers=a__ ).text
UpperCAmelCase = BeautifulSoup(a__ , '''html.parser''' ).find_all('''script''' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Tuple ):
return f"{self.__class__.__name__}('{self.username}')"
def __str__( self : Optional[int] ):
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def __snake_case ( self : Tuple ):
return self.user_data["username"]
@property
def __snake_case ( self : List[str] ):
return self.user_data["full_name"]
@property
def __snake_case ( self : int ):
return self.user_data["biography"]
@property
def __snake_case ( self : Tuple ):
return self.user_data["business_email"]
@property
def __snake_case ( self : Union[str, Any] ):
return self.user_data["external_url"]
@property
def __snake_case ( self : str ):
return self.user_data["edge_followed_by"]["count"]
@property
def __snake_case ( self : List[Any] ):
return self.user_data["edge_follow"]["count"]
@property
def __snake_case ( self : List[str] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __snake_case ( self : List[str] ):
return self.user_data["profile_pic_url_hd"]
@property
def __snake_case ( self : Union[str, Any] ):
return self.user_data["is_verified"]
@property
def __snake_case ( self : int ):
return self.user_data["is_private"]
def __snake_case ( SCREAMING_SNAKE_CASE_ : str = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('''CI''' ):
return # test failing on GitHub Actions
UpperCAmelCase = InstagramUser(SCREAMING_SNAKE_CASE_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , SCREAMING_SNAKE_CASE_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('''https://instagram.''' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Optional[int] = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 51 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''spiece.model'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class __UpperCAmelCase ( _UpperCamelCase ):
def __init__( self : str , a_ : Dict , a_ : List[str]=False , a_ : Any=True , a_ : int=False , a_ : Union[str, Any]="<s>" , a_ : Optional[int]="</s>" , a_ : int="<unk>" , a_ : List[Any]="<sep>" , a_ : Dict="<pad>" , a_ : Any="<cls>" , a_ : Optional[Any]="<mask>" , a_ : int=["<eop>", "<eod>"] , a_ : Optional[Dict[str, Any]] = None , **a_ : int , ) -> None:
'''simple docstring'''
a__ : List[str] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
a__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
a__ : Union[str, Any] = 3
a__ : Dict = do_lower_case
a__ : Union[str, Any] = remove_space
a__ : int = keep_accents
a__ : str = vocab_file
a__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
a__ : Optional[int] = jieba
a__ : Optional[int] = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ) -> List[str]:
'''simple docstring'''
a__ : Tuple = self.__dict__.copy()
a__ : Union[str, Any] = None
return state
def __setstate__( self : Tuple , a_ : int ) -> List[str]:
'''simple docstring'''
a__ : Any = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ : str = {}
a__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self : List[Any] , a_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if self.remove_space:
a__ : Union[str, Any] = " ".join(inputs.strip().split() )
else:
a__ : Optional[Any] = inputs
a__ : List[str] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
a__ : Union[str, Any] = unicodedata.normalize("NFKD" , a_ )
a__ : Union[str, Any] = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
a__ : List[Any] = outputs.lower()
return outputs
def UpperCAmelCase ( self : Any , a_ : str ) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = self.preprocess_text(a_ )
a__ : Dict = self.sp_model.encode(a_ , out_type=a_ )
a__ : Optional[Any] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
a__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
a__ : List[str] = cur_pieces[1:]
else:
a__ : List[str] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def UpperCAmelCase ( self : int , a_ : Dict ) -> int:
'''simple docstring'''
return self.sp_model.PieceToId(a_ )
def UpperCAmelCase ( self : Dict , a_ : Tuple ) -> List[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(a_ )
def UpperCAmelCase ( self : Union[str, Any] , a_ : List[Any] ) -> str:
'''simple docstring'''
a__ : Optional[Any] = "".join(a_ ).replace(a_ , " " ).strip()
return out_string
def UpperCAmelCase ( self : str , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[Any] = [self.sep_token_id]
a__ : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self : int , a_ : List[int] , a_ : Optional[List[int]] = None , a_ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def UpperCAmelCase ( self : List[Any] , a_ : List[int] , a_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
a__ : List[str] = [self.sep_token_id]
a__ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self : Dict , a_ : str , a_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a__ : Optional[int] = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , "wb" ) as fi:
a__ : int = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def UpperCAmelCase ( self : str , *a_ : Union[str, Any] , **a_ : Any ) -> int:
'''simple docstring'''
a__ : Optional[int] = super()._decode(*a_ , **a_ )
a__ : Tuple = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text | 642 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
__a : Any = num_of_nodes
__a : Optional[Any] = []
__a : Union[str, Any] = {}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
self.m_edges.append([u_node, v_node, weight] )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__a : str = self.find_component(_lowerCamelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if component_size[u_node] <= component_size[v_node]:
__a : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
__a : Optional[Any] = self.find_component(_lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowerCamelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = []
__a : Optional[Any] = 0
__a : List[str] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__a : List[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__a , __a , __a : Optional[int] = edge
__a : str = self.m_component[u]
__a : Optional[int] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__a : List[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__a , __a , __a : Optional[Any] = edge
__a : List[Any] = self.m_component[u]
__a : str = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
__a : Dict = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __A ( ) -> Tuple:
pass
if __name__ == "__main__":
import doctest
doctest.testmod() | 712 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
A = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __A ( a_ :Tuple) -> Dict:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_)
def __A ( a_ :Any) -> int:
from transformers.testing_utils import pytest_terminal_summary_main
__a : str = terminalreporter.config.getoption('''--make-reports''')
if make_reports:
pytest_terminal_summary_main(a_ , id=a_) | 101 | 0 |
import os
import numpy
import onnx
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = a.name
_lowerCAmelCase : int = b.name
_lowerCAmelCase : str = ""
_lowerCAmelCase : Any = ""
_lowerCAmelCase : List[str] = a == b
_lowerCAmelCase : Tuple = name_a
_lowerCAmelCase : Optional[Any] = name_b
return res
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_lowerCamelCase , _lowerCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _lowerCamelCase , _lowerCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = list(model.graph.initializer )
_lowerCAmelCase : str = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowerCAmelCase : Optional[int] = inits[i].name
_lowerCAmelCase : Any = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = os.path.dirname(_lowerCamelCase )
_lowerCAmelCase : int = os.path.basename(_lowerCamelCase )
_lowerCAmelCase : List[Any] = onnx.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) )
_lowerCAmelCase : int = list(model.graph.initializer )
_lowerCAmelCase : Any = set()
_lowerCAmelCase : Tuple = {}
_lowerCAmelCase : Any = []
_lowerCAmelCase : int = 0
for i in range(len(_lowerCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_lowerCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_lowerCamelCase )
dup_set.add(_lowerCamelCase )
_lowerCAmelCase : List[Any] = inits[j].data_type
_lowerCAmelCase : List[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("unexpected data type: " , _lowerCamelCase )
total_reduced_size += mem_size
_lowerCAmelCase : List[Any] = inits[i].name
_lowerCAmelCase : Optional[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_lowerCamelCase )
else:
_lowerCAmelCase : Any = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_024 / 1_024 / 1_024 , "GB" )
_lowerCAmelCase : List[Any] = sorted(_lowerCamelCase )
_remove_dup_initializers_from_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[Any] = "optimized_" + model_file_name
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , _lowerCamelCase )
onnx.save(_lowerCamelCase , _lowerCamelCase )
return new_model
| 500 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( a):
lowerCamelCase__ = (UniPCMultistepScheduler,)
lowerCamelCase__ = (('num_inference_steps', 25),)
def snake_case__ ( self, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__a)
return config
def snake_case__ ( self, __a=0, **__a):
'''simple docstring'''
_lowerCAmelCase : str = dict(self.forward_default_kwargs)
_lowerCAmelCase : Union[str, Any] = kwargs.pop("num_inference_steps", __a)
_lowerCAmelCase : List[str] = self.dummy_sample
_lowerCAmelCase : List[Any] = 0.1 * sample
_lowerCAmelCase : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : List[str] = self.get_scheduler_config(**__a)
_lowerCAmelCase : List[str] = scheduler_class(**__a)
scheduler.set_timesteps(__a)
# copy over dummy past residuals
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a)
_lowerCAmelCase : Optional[Any] = scheduler_class.from_pretrained(__a)
new_scheduler.set_timesteps(__a)
# copy over dummy past residuals
_lowerCAmelCase : List[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase : Dict = sample, sample
for t in range(__a, time_step + scheduler.config.solver_order + 1):
_lowerCAmelCase : Dict = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : int = new_scheduler.step(__a, __a, __a, **__a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, __a=0, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = dict(self.forward_default_kwargs)
_lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps", __a)
_lowerCAmelCase : Dict = self.dummy_sample
_lowerCAmelCase : List[str] = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**__a)
scheduler.set_timesteps(__a)
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a)
_lowerCAmelCase : Tuple = scheduler_class.from_pretrained(__a)
# copy over dummy past residuals
new_scheduler.set_timesteps(__a)
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : List[Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : Optional[Any] = new_scheduler.step(__a, __a, __a, **__a).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self, __a=None, **__a):
'''simple docstring'''
if scheduler is None:
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config(**__a)
_lowerCAmelCase : str = scheduler_class(**__a)
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config(**__a)
_lowerCAmelCase : Optional[Any] = scheduler_class(**__a)
_lowerCAmelCase : int = 10
_lowerCAmelCase : str = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(__a)
for i, t in enumerate(scheduler.timesteps):
_lowerCAmelCase : Tuple = model(__a, __a)
_lowerCAmelCase : Union[str, Any] = scheduler.step(__a, __a, __a).prev_sample
return sample
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dict(self.forward_default_kwargs)
_lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps", __a)
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : int = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**__a)
_lowerCAmelCase : List[str] = self.dummy_sample
_lowerCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(__a, "set_timesteps"):
scheduler.set_timesteps(__a)
elif num_inference_steps is not None and not hasattr(__a, "set_timesteps"):
_lowerCAmelCase : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
_lowerCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
_lowerCAmelCase : Optional[int] = scheduler.timesteps[5]
_lowerCAmelCase : Tuple = scheduler.timesteps[6]
_lowerCAmelCase : int = scheduler.step(__a, __a, __a, **__a).prev_sample
_lowerCAmelCase : Optional[Any] = scheduler.step(__a, __a, __a, **__a).prev_sample
self.assertEqual(output_a.shape, sample.shape)
self.assertEqual(output_a.shape, output_a.shape)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = UniPCMultistepScheduler(**self.get_scheduler_config())
_lowerCAmelCase : Optional[Any] = self.full_loop(scheduler=__a)
_lowerCAmelCase : List[str] = torch.mean(torch.abs(__a))
assert abs(result_mean.item() - 0.2_464) < 1E-3
_lowerCAmelCase : Tuple = DPMSolverSinglestepScheduler.from_config(scheduler.config)
_lowerCAmelCase : Any = DEISMultistepScheduler.from_config(scheduler.config)
_lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(scheduler.config)
_lowerCAmelCase : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config)
_lowerCAmelCase : Any = self.full_loop(scheduler=__a)
_lowerCAmelCase : Any = torch.mean(torch.abs(__a))
assert abs(result_mean.item() - 0.2_464) < 1E-3
def snake_case__ ( self):
'''simple docstring'''
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__a)
def snake_case__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=__a)
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__a, prediction_type=__a, sample_max_value=__a, solver_order=__a, solver_type=__a, )
def snake_case__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def snake_case__ ( self):
'''simple docstring'''
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__a, solver_type=__a, prediction_type=__a, )
_lowerCAmelCase : Optional[int] = self.full_loop(
solver_order=__a, solver_type=__a, prediction_type=__a, )
assert not torch.isnan(__a).any(), "Samples have nan numbers"
def snake_case__ ( self):
'''simple docstring'''
self.check_over_configs(lower_order_final=__a)
self.check_over_configs(lower_order_final=__a)
def snake_case__ ( self):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__a, time_step=0)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.full_loop()
_lowerCAmelCase : str = torch.mean(torch.abs(__a))
assert abs(result_mean.item() - 0.2_464) < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.full_loop(prediction_type="v_prediction")
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(__a))
assert abs(result_mean.item() - 0.1_014) < 1E-3
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
_lowerCAmelCase : Union[str, Any] = self.get_scheduler_config(thresholding=__a, dynamic_thresholding_ratio=0)
_lowerCAmelCase : Optional[int] = scheduler_class(**__a)
_lowerCAmelCase : str = 10
_lowerCAmelCase : List[str] = self.dummy_model()
_lowerCAmelCase : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(__a)
for i, t in enumerate(scheduler.timesteps):
_lowerCAmelCase : Dict = model(__a, __a)
_lowerCAmelCase : List[str] = scheduler.step(__a, __a, __a).prev_sample
assert sample.dtype == torch.floataa
def snake_case__ ( self, **__a):
'''simple docstring'''
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config(**__a)
_lowerCAmelCase : List[Any] = scheduler_class(**__a)
scheduler.set_timesteps(scheduler.config.num_train_timesteps)
assert len(scheduler.timesteps.unique()) == scheduler.num_inference_steps
| 500 | 1 |
"""simple docstring"""
from typing import List
import numpy as np
def lowerCamelCase__ ( _lowerCamelCase : str ) -> List[str]:
lowerCamelCase_ = {key: len(A_ ) for key, value in gen_kwargs.items() if isinstance(A_ , A_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'Sharding is ambiguous for this dataset: '
+ 'we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'
+ '\n'.join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ '\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '
+ 'and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'
) )
lowerCamelCase_ = max(lists_lengths.values() , default=0 )
return max(1 , A_ )
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any ) -> Tuple:
lowerCamelCase_ = []
for group_idx in range(A_ ):
lowerCamelCase_ = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase_ = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase_ = range(A_ , start + num_shards_to_add )
shards_indices_per_group.append(A_ )
return shards_indices_per_group
def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ) -> int:
lowerCamelCase_ = _number_of_shards_in_gen_kwargs(A_ )
if num_shards == 1:
return [dict(A_ )]
else:
lowerCamelCase_ = _distribute_shards(num_shards=A_ , max_num_jobs=A_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(A_ , A_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(A_ ) )
]
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] ) -> int:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , A_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : Dict ) -> Optional[int]:
lowerCamelCase_ = {len(A_ ) for value in gen_kwargs.values() if isinstance(A_ , A_ )}
lowerCamelCase_ = {}
for size in list_sizes:
lowerCamelCase_ = list(range(A_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase_ = dict(A_ )
for key, value in shuffled_kwargs.items():
if isinstance(A_ , A_ ):
lowerCamelCase_ = [value[i] for i in indices_per_size[len(A_ )]]
return shuffled_kwargs
| 710 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
while a != 0:
lowerCamelCase_ , lowerCamelCase_ = b % a, a
return b
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
if gcd(_lowerCamelCase , _lowerCamelCase ) != 1:
lowerCamelCase_ = F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowerCamelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1, 0, a
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0, 1, m
while va != 0:
lowerCamelCase_ = ua // va
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 137 | 0 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class _A:
"""simple docstring"""
def __init__( self ):
__A : List[str] = ''
__A : str = ''
__A : Union[str, Any] = []
__A : str = 0
__A : str = 256
__A : str = 0
__A : Any = 0
__A : List[Any] = 0
__A : Optional[Any] = 0
def UpperCAmelCase_ ( self , _A ):
__A : Dict = cva.imread(_A , 0 )
__A : int = copy.deepcopy(self.img )
__A , __A , __A : Union[str, Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
__A : str = np.sum(_A )
for i in range(len(_A ) ):
__A : Any = x[i] / self.k
self.sk += prk
__A : Union[str, Any] = (self.L - 1) * self.sk
if self.rem != 0:
__A : Dict = int(last % last )
__A : Union[str, Any] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(_A )
__A : Dict = int(np.ma.count(self.img ) / self.img[1].size )
__A : str = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
__A : Dict = self.img[j][i]
if num != self.last_list[num]:
__A : Optional[int] = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCAmelCase_ ( self ):
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCAmelCase_ ( self ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCAmelCase : Tuple = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
UpperCAmelCase : str = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 239 |
from importlib import import_module
from .logging import get_logger
UpperCAmelCase : Union[str, Any] = get_logger(__name__)
class _A:
"""simple docstring"""
def __init__( self , _A , _A=None ):
__A : Union[str, Any] = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('__' ):
setattr(self , _A , getattr(_A , _A ) )
__A : Optional[int] = module._original_module if isinstance(_A , _PatchedModuleObj ) else module
class _A:
"""simple docstring"""
UpperCamelCase : Union[str, Any] = []
def __init__( self , _A , _A , _A , _A=None ):
__A : List[Any] = obj
__A : Dict = target
__A : Optional[int] = new
__A : Optional[Any] = target.split('.' )[0]
__A : Tuple = {}
__A : Dict = attrs or []
def __enter__( self ):
*__A , __A : str = self.target.split('.' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_A ) ):
try:
__A : Union[str, Any] = import_module('.'.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
__A : Union[str, Any] = getattr(self.obj , _A )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_A , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
__A : Optional[Any] = obj_attr
# patch at top level
setattr(self.obj , _A , _PatchedModuleObj(_A , attrs=self.attrs ) )
__A : List[str] = getattr(self.obj , _A )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_A , _A , _PatchedModuleObj(getattr(_A , _A , _A ) , attrs=self.attrs ) )
__A : str = getattr(_A , _A )
# finally set the target attribute
setattr(_A , _A , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
__A : Union[str, Any] = getattr(import_module('.'.join(_A ) ) , _A )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _A ) is attr_value:
__A : Union[str, Any] = getattr(self.obj , _A )
setattr(self.obj , _A , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
__A : Tuple = globals()['__builtins__'][target_attr]
setattr(self.obj , _A , self.new )
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self , *_A ):
for attr in list(self.original ):
setattr(self.obj , _A , self.original.pop(_A ) )
def UpperCAmelCase_ ( self ):
self.__enter__()
self._active_patches.append(self )
def UpperCAmelCase_ ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 239 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 548 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCAmelCase ):
UpperCamelCase__ = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = FlaxAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCAmelCase ):
UpperCamelCase__ = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = FlaxAutoModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCamelCase__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = FlaxBertModel.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCAmelCase ):
return model(**__lowerCAmelCase )
eval(**__lowerCAmelCase ).block_until_ready()
@slow
def _lowerCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
UpperCamelCase__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = FlaxRobertaModel.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCAmelCase ):
return model(**__lowerCAmelCase )
eval(**__lowerCAmelCase ).block_until_ready()
def _lowerCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCAmelCase , """bert-base is not a local folder and is not a valid model identifier""" ):
UpperCamelCase__ = FlaxAutoModel.from_pretrained("""bert-base""" )
def _lowerCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
UpperCamelCase__ = FlaxAutoModel.from_pretrained(__lowerCAmelCase , revision="""aaaaaa""" )
def _lowerCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ):
UpperCamelCase__ = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" )
def _lowerCamelCase ( self ):
with self.assertRaisesRegex(__lowerCAmelCase , """Use `from_pt=True` to load this model""" ):
UpperCamelCase__ = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
| 548 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = (UniPCMultistepScheduler,)
lowerCamelCase__ = (("num_inference_steps", 2_5),)
def _snake_case ( self : Optional[int] , **__lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__lowerCamelCase )
return config
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Optional[Any]=0 , **__lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(__lowerCamelCase )
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sample, sample
for t in range(__lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _snake_case ( self : int , __lowerCamelCase : Tuple=0 , **__lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class.from_pretrained(__lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = new_scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _snake_case ( self : Any , __lowerCamelCase : str=None , **__lowerCamelCase : str ):
if scheduler is None:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
return sample
def _snake_case ( self : Optional[int] ):
SCREAMING_SNAKE_CASE = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE = kwargs.pop("num_inference_steps" , __lowerCamelCase )
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.dummy_sample
SCREAMING_SNAKE_CASE = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__lowerCamelCase )
elif num_inference_steps is not None and not hasattr(__lowerCamelCase , "set_timesteps" ):
SCREAMING_SNAKE_CASE = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
SCREAMING_SNAKE_CASE = [residual + 0.2, residual + 0.15, residual + 0.10]
SCREAMING_SNAKE_CASE = dummy_past_residuals[: scheduler.config.solver_order]
SCREAMING_SNAKE_CASE = scheduler.timesteps[5]
SCREAMING_SNAKE_CASE = scheduler.timesteps[6]
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self : int ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
SCREAMING_SNAKE_CASE = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE = self.full_loop(scheduler=__lowerCamelCase )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def _snake_case ( self : Any ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def _snake_case ( self : List[str] ):
self.check_over_configs(thresholding=__lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__lowerCamelCase , prediction_type=__lowerCamelCase , sample_max_value=__lowerCamelCase , solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , )
def _snake_case ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def _snake_case ( self : Optional[Any] ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = self.full_loop(
solver_order=__lowerCamelCase , solver_type=__lowerCamelCase , prediction_type=__lowerCamelCase , )
assert not torch.isnan(__lowerCamelCase ).any(), "Samples have nan numbers"
def _snake_case ( self : Tuple ):
self.check_over_configs(lower_order_final=__lowerCamelCase )
self.check_over_configs(lower_order_final=__lowerCamelCase )
def _snake_case ( self : List[str] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__lowerCamelCase , time_step=0 )
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = self.full_loop()
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_464 ) < 1e-3
def _snake_case ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_mean.item() - 0.1_014 ) < 1e-3
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(thresholding=__lowerCamelCase , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = 10
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.half()
scheduler.set_timesteps(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def _snake_case ( self : Optional[int] , **__lowerCamelCase : List[str] ):
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE = self.get_scheduler_config(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps | 16 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[Any] = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "gpt_neox"
def __init__( self : Optional[int] , __lowerCamelCase : List[str]=50432 , __lowerCamelCase : int=6144 , __lowerCamelCase : Optional[Any]=44 , __lowerCamelCase : Tuple=64 , __lowerCamelCase : Optional[int]=24576 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Any=0.25 , __lowerCamelCase : List[Any]=10000 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[Any]=2048 , __lowerCamelCase : Tuple=0.02 , __lowerCamelCase : Tuple=1e-5 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=0 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : List[str]=False , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : str , ):
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = rotary_pct
SCREAMING_SNAKE_CASE = rotary_emb_base
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = classifier_dropout
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = tie_word_embeddings
SCREAMING_SNAKE_CASE = use_parallel_residual
SCREAMING_SNAKE_CASE = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _snake_case ( self : Union[str, Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("type" , __lowerCamelCase )
SCREAMING_SNAKE_CASE = self.rope_scaling.get("factor" , __lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowerCamelCase , __lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" ) | 16 | 1 |
_lowerCamelCase = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 710 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowerCamelCase = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
_lowerCamelCase = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowerCamelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowerCamelCase = sorted(arg_to_scheduler.keys())
_lowerCamelCase = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class _SCREAMING_SNAKE_CASE (pl.LightningModule ):
def __init__( self : Dict , UpperCamelCase : argparse.Namespace , UpperCamelCase : str=None , UpperCamelCase : Tuple="base" , UpperCamelCase : Optional[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : int , )->Dict:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List[Any] = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE : Dict = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase , **UpperCamelCase , )
else:
__SCREAMING_SNAKE_CASE : PretrainedConfig = config
__SCREAMING_SNAKE_CASE : Any = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase , UpperCamelCase ):
assert hasattr(self.config , UpperCamelCase ), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config , UpperCamelCase , getattr(self.hparams , UpperCamelCase ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase , )
else:
__SCREAMING_SNAKE_CASE : PreTrainedTokenizer = tokenizer
__SCREAMING_SNAKE_CASE : str = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase , )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = model
def __snake_case ( self : Dict , *UpperCamelCase : Dict , **UpperCamelCase : Union[str, Any] )->Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_type.from_pretrained(*UpperCamelCase , **UpperCamelCase )
def __snake_case ( self : Optional[Any] )->Tuple:
__SCREAMING_SNAKE_CASE : Tuple = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE : List[str] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def __snake_case ( self : int )->Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.model
__SCREAMING_SNAKE_CASE : Any = ["bias", "LayerNorm.weight"]
__SCREAMING_SNAKE_CASE : Optional[int] = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE : Dict = Adafactor(
UpperCamelCase , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase , relative_step=UpperCamelCase )
else:
__SCREAMING_SNAKE_CASE : Tuple = AdamW(
UpperCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE : List[Any] = optimizer
__SCREAMING_SNAKE_CASE : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case ( self : Dict , UpperCamelCase : Any , UpperCamelCase : Any )->Optional[Any]:
return self.validation_step(UpperCamelCase , UpperCamelCase )
def __snake_case ( self : List[Any] , UpperCamelCase : Tuple )->List[Any]:
return self.validation_end(UpperCamelCase )
def __snake_case ( self : str )->int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE : List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case ( self : Union[str, Any] , UpperCamelCase : Tuple )->Optional[Any]:
if stage == "test":
__SCREAMING_SNAKE_CASE : List[str] = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = len(self.train_dataloader().dataset )
def __snake_case ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : bool = False )->List[Any]:
raise NotImplementedError("You must implement this for your task" )
def __snake_case ( self : List[Any] )->Union[str, Any]:
return self.train_loader
def __snake_case ( self : Tuple )->Union[str, Any]:
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=UpperCamelCase )
def __snake_case ( self : Optional[Any] )->Any:
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=UpperCamelCase )
def __snake_case ( self : int , UpperCamelCase : List[str] )->int:
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
UpperCamelCase , list(filter(UpperCamelCase , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __snake_case ( self : Union[str, Any] , UpperCamelCase : Dict[str, Any] )->None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.output_dir.joinpath("best_tfmr" )
__SCREAMING_SNAKE_CASE : List[Any] = self.step_count
self.model.save_pretrained(UpperCamelCase )
self.tokenizer.save_pretrained(UpperCamelCase )
@staticmethod
def __snake_case ( UpperCamelCase : Any , UpperCamelCase : Optional[Any] )->List[str]:
parser.add_argument(
"--model_name_or_path" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=UpperCamelCase , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=UpperCamelCase , type=UpperCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(UpperCamelCase ).parent / "test_run" / "cache" ) , type=UpperCamelCase , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=UpperCamelCase , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=UpperCamelCase , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=UpperCamelCase , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=UpperCamelCase , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5E-5 , type=UpperCamelCase , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=UpperCamelCase , metavar=UpperCamelCase , type=UpperCamelCase , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=UpperCamelCase , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=UpperCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=UpperCamelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=UpperCamelCase , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=UpperCamelCase )
parser.add_argument("--train_batch_size" , default=3_2 , type=UpperCamelCase )
parser.add_argument("--eval_batch_size" , default=3_2 , type=UpperCamelCase )
parser.add_argument("--adafactor" , action="store_true" )
class _SCREAMING_SNAKE_CASE (pl.Callback ):
def __snake_case ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple )->Tuple:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _SCREAMING_SNAKE_CASE (pl.Callback ):
def __snake_case ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] )->Tuple:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase )
class _SCREAMING_SNAKE_CASE (pl.Callback ):
def __snake_case ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Dict )->List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.lr_schedulers[0]["scheduler"]
__SCREAMING_SNAKE_CASE : Optional[int] = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase )
def __snake_case ( self : Tuple , UpperCamelCase : pl.Trainer , UpperCamelCase : pl.LightningModule )->Union[str, Any]:
rank_zero_info("***** Validation results *****" )
__SCREAMING_SNAKE_CASE : Optional[Any] = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase , str(metrics[key] ) ) )
def __snake_case ( self : str , UpperCamelCase : pl.Trainer , UpperCamelCase : pl.LightningModule )->Optional[Any]:
rank_zero_info("***** Test results *****" )
__SCREAMING_SNAKE_CASE : List[Any] = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(UpperCamelCase , "w" ) as writer:
for key in sorted(UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(UpperCamelCase , str(metrics[key] ) ) )
def _lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
parser.add_argument(
"--output_dir" , default=str(Path(__lowerCamelCase ).parent / "test_run" / "model_checkpoints" ) , type=__lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__lowerCamelCase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=__lowerCamelCase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=__lowerCamelCase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=__lowerCamelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=__lowerCamelCase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(__lowerCamelCase ).parent / "test_run" / "dummy-train-data" ) , type=__lowerCamelCase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def _lowerCAmelCase ( __lowerCamelCase : BaseTransformer , __lowerCamelCase : argparse.Namespace , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=[] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Any , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE : List[str] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__lowerCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE : Tuple = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__lowerCamelCase )
if logging_callback is None:
__SCREAMING_SNAKE_CASE : List[str] = LoggingCallback()
__SCREAMING_SNAKE_CASE : str = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE : Any = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE : List[Any] = "auto"
__SCREAMING_SNAKE_CASE : List[Any] = "ddp"
__SCREAMING_SNAKE_CASE : List[str] = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Optional[int] = "auto"
__SCREAMING_SNAKE_CASE : Optional[int] = pl.Trainer.from_argparse_args(
__lowerCamelCase , weights_summary=__lowerCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__lowerCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **__lowerCamelCase , )
if args.do_train:
trainer.fit(__lowerCamelCase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 447 | 0 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE : Dict = 16
SCREAMING_SNAKE_CASE : Optional[Any] = 32
def _UpperCamelCase ( lowerCAmelCase__: Accelerator ,lowerCAmelCase__: int = 16 ) -> str:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ = load_dataset('glue' ,'mrpc' )
def tokenize_function(lowerCAmelCase__: List[Any] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(examples['sentence1'] ,examples['sentence2'] ,truncation=__lowerCAmelCase ,max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ = datasets.map(
__lowerCAmelCase ,batched=__lowerCAmelCase ,remove_columns=['idx', 'sentence1', 'sentence2'] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ = tokenized_datasets.rename_column('label' ,'labels' )
def collate_fn(lowerCAmelCase__: Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ = 8
else:
SCREAMING_SNAKE_CASE_ = None
return tokenizer.pad(
__lowerCAmelCase ,padding='longest' ,max_length=__lowerCAmelCase ,pad_to_multiple_of=__lowerCAmelCase ,return_tensors='pt' ,)
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets['train'] ,shuffle=__lowerCAmelCase ,collate_fn=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = DataLoader(
tokenized_datasets['validation'] ,shuffle=__lowerCAmelCase ,collate_fn=__lowerCAmelCase ,batch_size=__lowerCAmelCase ,drop_last=(accelerator.mixed_precision == 'fp8') ,)
return train_dataloader, eval_dataloader
def _UpperCamelCase ( lowerCAmelCase__: List[str] ,lowerCAmelCase__: Optional[int] ) -> int:
# Initialize accelerator
SCREAMING_SNAKE_CASE_ = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ = config["""lr"""]
SCREAMING_SNAKE_CASE_ = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE_ = int(config['seed'] )
SCREAMING_SNAKE_CASE_ = int(config['batch_size'] )
SCREAMING_SNAKE_CASE_ = evaluate.load('glue' ,'mrpc' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_ = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_ = MAX_GPU_BATCH_SIZE
set_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = get_dataloaders(__lowerCAmelCase ,__lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' ,return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ = AdamW(params=model.parameters() ,lr=__lowerCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase ,num_warmup_steps=100 ,num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ = accelerator.prepare(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_ = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.loss
SCREAMING_SNAKE_CASE_ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__lowerCAmelCase ,references=__lowerCAmelCase ,)
SCREAMING_SNAKE_CASE_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" ,__lowerCAmelCase )
def _UpperCamelCase ( ) -> Tuple:
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' ,type=__lowerCAmelCase ,default=__lowerCAmelCase ,choices=['no', 'fp16', 'bf16', 'fp8'] ,help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' ,)
parser.add_argument('--cpu' ,action='store_true' ,help='If passed, will train on the CPU.' )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase ,__lowerCAmelCase )
if __name__ == "__main__":
main()
| 294 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any ) -> Optional[int]:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __lowerCamelCase ( __lowerCAmelCase : str ) -> Union[str, Any]:
__UpperCamelCase : Dict = _TestCommandArgs(dataset=__lowerCAmelCase , all_configs=__lowerCAmelCase , save_infos=__lowerCAmelCase )
__UpperCamelCase : List[Any] = TestCommand(*__lowerCAmelCase )
test_command.run()
__UpperCamelCase : Any = os.path.join(__lowerCAmelCase , """README.md""" )
assert os.path.exists(__lowerCAmelCase )
__UpperCamelCase : Optional[int] = DatasetInfosDict.from_directory(__lowerCAmelCase )
__UpperCamelCase : List[Any] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2351563,
"""num_examples""": 10000,
},
{
"""name""": """validation""",
"""num_bytes""": 238418,
"""num_examples""": 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = getattr(dataset_infos["""default"""] , __lowerCAmelCase ), getattr(expected_dataset_infos["""default"""] , __lowerCAmelCase )
if key == "num_bytes":
assert is_apercent_close(__lowerCAmelCase , __lowerCAmelCase )
elif key == "splits":
assert list(__lowerCAmelCase ) == list(__lowerCAmelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 269 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A: Union[str, Any] = tuple[int, int]
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : set[int] = vertices
UpperCAmelCase : dict[EdgeT, int] = {
(min(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase : List[str] = weight
def SCREAMING_SNAKE_CASE ( self ) -> Graph:
'''simple docstring'''
UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} )
UpperCAmelCase : EdgeT
UpperCAmelCase : int
UpperCAmelCase : EdgeT
UpperCAmelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase : Union[str, Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase : Optional[Any] = edge
UpperCAmelCase : Any = weight
subgraph.add_edge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return subgraph
def _snake_case ( UpperCamelCase : str = "p107_network.txt" ):
UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase ) )
UpperCAmelCase : str = os.path.join(UpperCamelCase , UpperCamelCase )
UpperCAmelCase : dict[EdgeT, int] = {}
UpperCAmelCase : list[str]
UpperCAmelCase : int
UpperCAmelCase : int
with open(UpperCamelCase ) as f:
UpperCAmelCase : Dict = f.read().strip().split("""\n""" )
UpperCAmelCase : List[Any] = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(UpperCamelCase ) ):
for edgea in range(UpperCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase : Any = int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase ) ) ) , UpperCamelCase )
UpperCAmelCase : Graph = graph.prims_algorithm()
UpperCAmelCase : int = sum(graph.edges.values() )
UpperCAmelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 716 |
"""simple docstring"""
import math
from datetime import datetime, timedelta
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : Any = year % 19
UpperCAmelCase : Any = year % 4
UpperCAmelCase : str = year % 7
UpperCAmelCase : Union[str, Any] = math.floor(year / 100 )
UpperCAmelCase : Optional[Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
UpperCAmelCase : int = leap_day_inhibits / 4
UpperCAmelCase : int = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
UpperCAmelCase : Tuple = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCAmelCase : int = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
UpperCAmelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(UpperCamelCase , 4 , 18 )
else:
return datetime(UpperCamelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
A: Any = "will be" if year > datetime.now().year else "was"
print(f"""Easter in {year} {tense} {gauss_easter(year)}""")
| 359 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Any ):
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_lowercase , )
assert hasattr(self , '''env''' )
def lowercase__ ( self : Any , _lowercase : List[str] ):
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE__ : List[str] = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE__ : int = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE__ : Optional[int] = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_lowercase , instance_type=self.instance_type , debugger_hook_config=_lowercase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=_lowercase , py_version='''py36''' , )
def lowercase__ ( self : Optional[int] , _lowercase : Union[str, Any] ):
TrainingJobAnalytics(_lowercase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowercase__ ( self : str , _lowercase : Optional[Any] ):
# create estimator
SCREAMING_SNAKE_CASE__ : Tuple = self.create_estimator(_lowercase )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__ : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE__ : Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__ : str = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _lowercase )
| 35 |
"""simple docstring"""
__UpperCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase__ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
# Return True if there is node that has not iterated.
a__ : int = [False] * len(lowerCAmelCase__ )
a__ : Optional[Any] = [s]
a__ : Optional[int] = True
while queue:
a__ : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCAmelCase__ )
a__ : Any = True
a__ : Union[str, Any] = u
return visited[t]
def lowercase__ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
a__ : Any = [-1] * (len(lowerCAmelCase__ ))
a__ : Any = 0
a__ : List[str] = []
a__ : Tuple = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any = float("Inf" )
a__ : Optional[int] = sink
while s != source:
# Find the minimum value in select path
a__ : Union[str, Any] = min(lowerCAmelCase__ , graph[parent[s]][s] )
a__ : List[str] = parent[s]
max_flow += path_flow
a__ : List[str] = sink
while v != source:
a__ : int = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
a__ : List[str] = parent[v]
for i in range(len(lowerCAmelCase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 642 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__UpperCAmelCase : int = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[Any] = ['pixel_values']
def __init__( self : int , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 255 , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = True , **__snake_case : Optional[Any] , ) -> None:
super().__init__(**__snake_case )
_a : Union[str, Any] = size if size is not None else {'''shortest_edge''': 224}
_a : List[str] = get_size_dict(__snake_case , default_to_square=__snake_case )
_a : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_a : Any = get_size_dict(__snake_case , default_to_square=__snake_case , param_name='''crop_size''' )
_a : Any = do_resize
_a : Dict = size
_a : Optional[Any] = resample
_a : int = do_center_crop
_a : int = crop_size
_a : str = do_rescale
_a : Tuple = rescale_factor
_a : Tuple = do_normalize
_a : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_a : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD
_a : Any = do_convert_rgb
def snake_case_ ( self : str , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[int] , ) -> np.ndarray:
_a : Dict = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_a : List[Any] = get_resize_output_image_size(__snake_case , size=size['''shortest_edge'''] , default_to_square=__snake_case )
return resize(__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
def snake_case_ ( self : Dict , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ) -> np.ndarray:
_a : List[Any] = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__snake_case , size=(size['''height'''], size['''width''']) , data_format=__snake_case , **__snake_case )
def snake_case_ ( self : Tuple , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : int , ) -> Any:
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def snake_case_ ( self : str , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Any , ) -> np.ndarray:
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def snake_case_ ( self : List[str] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : int = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : bool = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[ChannelDimension] = ChannelDimension.FIRST , **__snake_case : Optional[int] , ) -> PIL.Image.Image:
_a : int = do_resize if do_resize is not None else self.do_resize
_a : Union[str, Any] = size if size is not None else self.size
_a : str = get_size_dict(__snake_case , param_name='''size''' , default_to_square=__snake_case )
_a : str = resample if resample is not None else self.resample
_a : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
_a : List[str] = crop_size if crop_size is not None else self.crop_size
_a : str = get_size_dict(__snake_case , param_name='''crop_size''' , default_to_square=__snake_case )
_a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
_a : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_a : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
_a : Optional[int] = image_mean if image_mean is not None else self.image_mean
_a : Union[str, Any] = image_std if image_std is not None else self.image_std
_a : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_a : Optional[Any] = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_a : List[str] = [convert_to_rgb(__snake_case ) for image in images]
# All transformations expect numpy arrays.
_a : Dict = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
_a : Any = [self.resize(image=__snake_case , size=__snake_case , resample=__snake_case ) for image in images]
if do_center_crop:
_a : Dict = [self.center_crop(image=__snake_case , size=__snake_case ) for image in images]
if do_rescale:
_a : Optional[int] = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
_a : List[Any] = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
_a : int = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
_a : int = {'''pixel_values''': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 249 |
from __future__ import annotations
import numpy as np
def lowerCamelCase_ ( UpperCamelCase_ ):
return np.maximum(0 , UpperCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 249 | 1 |
import qiskit
def __magic_name__ ( lowercase , lowercase ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowercase_ : Dict = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
lowercase_ : str = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowercase_ : List[Any] = qiskit.execute(lowercase , lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''') | 458 |
UpperCAmelCase_ = {"""a""": ["""c""", """b"""], """b""": ["""d""", """e"""], """c""": [], """d""": [], """e""": []}
UpperCAmelCase_ = ["""a""", """b""", """c""", """d""", """e"""]
def __magic_name__ ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
"""simple docstring"""
lowercase_ : Tuple = start
# add current to visited
visited.append(lowercase )
lowercase_ : Any = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowercase_ : List[str] = topological_sort(lowercase , lowercase , lowercase )
# if all neighbors visited add current to sort
sort.append(lowercase )
# if all vertices haven't been visited select a new one to visit
if len(lowercase ) != len(lowercase ):
for vertice in vertices:
if vertice not in visited:
lowercase_ : Optional[Any] = topological_sort(lowercase , lowercase , lowercase )
# return sort
return sort
if __name__ == "__main__":
UpperCAmelCase_ = topological_sort("""a""", [], [])
print(sort) | 458 | 1 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase ( __snake_case ):
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "OwlViTImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , a_=None , a_=None , **a_ ):
lowerCAmelCase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _lowercase , )
lowerCAmelCase : str = kwargs.pop("feature_extractor" )
lowerCAmelCase : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_lowercase , _lowercase )
def __call__( self , a_=None , a_=None , a_=None , a_="max_length" , a_="np" , **a_ ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_lowercase , _lowercase ) or (isinstance(_lowercase , _lowercase ) and not isinstance(text[0] , _lowercase )):
lowerCAmelCase : Any = [self.tokenizer(_lowercase , padding=_lowercase , return_tensors=_lowercase , **_lowercase )]
elif isinstance(_lowercase , _lowercase ) and isinstance(text[0] , _lowercase ):
lowerCAmelCase : Optional[Any] = []
# Maximum number of queries across batch
lowerCAmelCase : str = max([len(_lowercase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowercase ) != max_num_queries:
lowerCAmelCase : int = t + [""" """] * (max_num_queries - len(_lowercase ))
lowerCAmelCase : Dict = self.tokenizer(_lowercase , padding=_lowercase , return_tensors=_lowercase , **_lowercase )
encodings.append(_lowercase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
lowerCAmelCase : Union[str, Any] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCAmelCase : Union[str, Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase : Union[str, Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCAmelCase : Optional[int] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase : List[str] = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
lowerCAmelCase : int = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase : List[str] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
lowerCAmelCase : Dict = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
lowerCAmelCase : List[str] = BatchEncoding()
lowerCAmelCase : Union[str, Any] = input_ids
lowerCAmelCase : str = attention_mask
if query_images is not None:
lowerCAmelCase : int = BatchEncoding()
lowerCAmelCase : str = self.image_processor(
_lowercase , return_tensors=_lowercase , **_lowercase ).pixel_values
lowerCAmelCase : Union[str, Any] = query_pixel_values
if images is not None:
lowerCAmelCase : List[Any] = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and images is not None:
lowerCAmelCase : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def _lowerCamelCase ( self , *a_ , **a_ ):
return self.image_processor.post_process(*_lowercase , **_lowercase )
def _lowerCamelCase ( self , *a_ , **a_ ):
return self.image_processor.post_process_object_detection(*_lowercase , **_lowercase )
def _lowerCamelCase ( self , *a_ , **a_ ):
return self.image_processor.post_process_image_guided_detection(*_lowercase , **_lowercase )
def _lowerCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def _lowerCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def _lowerCamelCase ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowercase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowercase , )
return self.image_processor
| 719 |
'''simple docstring'''
def __A ( a_ : int ):
if not isinstance(a_ ,a_ ):
lowerCAmelCase : Dict = f'''Input value of [number={number}] must be an integer'''
raise TypeError(a_ )
if number < 0:
return False
lowerCAmelCase : Dict = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "spiece.model"}
UpperCamelCase__ = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
UpperCamelCase__ = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
UpperCamelCase__ = "▁"
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Union[str, Any] = VOCAB_FILES_NAMES
snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="[SEP]" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="[CLS]" , __lowerCAmelCase="[MASK]" , __lowerCAmelCase = None , **__lowerCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCamelCase__ = (
AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase , normalized=__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase )
else mask_token
)
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCAmelCase , remove_space=__lowerCAmelCase , keep_accents=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = remove_space
UpperCamelCase__ = keep_accents
UpperCamelCase__ = vocab_file
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@property
def _lowerCamelCase ( self ):
return len(self.sp_model )
def _lowerCamelCase ( self ):
UpperCamelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self , __lowerCAmelCase ):
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self , __lowerCAmelCase ):
if self.remove_space:
UpperCamelCase__ = """ """.join(inputs.strip().split() )
else:
UpperCamelCase__ = inputs
UpperCamelCase__ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
UpperCamelCase__ = unicodedata.normalize("""NFKD""" , __lowerCAmelCase )
UpperCamelCase__ = """""".join([c for c in outputs if not unicodedata.combining(__lowerCAmelCase )] )
if self.do_lower_case:
UpperCamelCase__ = outputs.lower()
return outputs
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = self.preprocess_text(__lowerCAmelCase )
UpperCamelCase__ = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
UpperCamelCase__ = []
for piece in pieces:
if len(__lowerCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
UpperCamelCase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
UpperCamelCase__ = cur_pieces[1:]
else:
UpperCamelCase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCAmelCase )
else:
new_pieces.append(__lowerCAmelCase )
return new_pieces
def _lowerCamelCase ( self , __lowerCAmelCase ):
return self.sp_model.PieceToId(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
return self.sp_model.IdToPiece(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = []
UpperCamelCase__ = """"""
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
UpperCamelCase__ = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
return [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , """wb""" ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 619 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
snake_case : List[str] = VQModel
snake_case : Dict = """sample"""
@property
def _lowerCamelCase ( self , __lowerCAmelCase=(32, 32) ):
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
return {"sample": image}
@property
def _lowerCamelCase ( self ):
return (3, 32, 32)
@property
def _lowerCamelCase ( self ):
return (3, 32, 32)
def _lowerCamelCase ( self ):
UpperCamelCase__ = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
UpperCamelCase__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowerCamelCase ( self ):
UpperCamelCase__ = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(__lowerCAmelCase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCamelCase__ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
UpperCamelCase__ = image.to(__lowerCAmelCase )
with torch.no_grad():
UpperCamelCase__ = model(__lowerCAmelCase ).sample
UpperCamelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
| 619 | 1 |
"""simple docstring"""
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: int ) -> int:
'''simple docstring'''
return number | (1 << position)
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: int ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: int ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: int ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: int ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 612 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
UpperCamelCase = True
except ImportError:
UpperCamelCase = False
try:
from torch.hub import _get_torch_home
UpperCamelCase = _get_torch_home()
except ImportError:
UpperCamelCase = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
UpperCamelCase = os.path.join(torch_cache_home, """transformers""")
UpperCamelCase = """https://cdn.huggingface.co"""
UpperCamelCase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
UpperCamelCase = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
UpperCamelCase = os.path.join(PATH, """config.yaml""")
UpperCamelCase = os.path.join(PATH, """attributes.txt""")
UpperCamelCase = os.path.join(PATH, """objects.txt""")
UpperCamelCase = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
UpperCamelCase = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
UpperCamelCase = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
UpperCamelCase = """pytorch_model.bin"""
UpperCamelCase = """config.yaml"""
def lowerCAmelCase ( UpperCamelCase_: List[str]=OBJECTS , UpperCamelCase_: str=ATTRIBUTES ) -> str:
'''simple docstring'''
_a = []
with open(UpperCamelCase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_a = []
with open(UpperCamelCase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase ( UpperCamelCase_: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a = OrderedDict()
with open(UpperCamelCase_ , "rb" ) as f:
_a = pkl.load(UpperCamelCase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_a = ckp.pop(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , np.ndarray ):
_a = torch.tensor(UpperCamelCase_ )
else:
assert isinstance(UpperCamelCase_ , torch.tensor ), type(UpperCamelCase_ )
_a = v
return r
class lowercase_ :
A__ : Tuple = {}
def __init__( self , a_ , a_ = "root" , a_=0 ) ->Optional[Any]:
'''simple docstring'''
_a = name
_a = level
_a = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_a = copy.deepcopy(a_ )
_a = copy.deepcopy(a_ )
if isinstance(a_ , a_ ):
_a = Config(a_ , name=a_ , level=level + 1 )
_a = v
setattr(self , a_ , a_ )
_a = d
def __repr__( self ) ->Optional[int]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , a_ , a_ ) ->str:
'''simple docstring'''
_a = val
_a = val
_a = key.split("." )
_a = len(a_ ) - 1
_a = self._pointer
if len(a_ ) > 1:
for i, l in enumerate(a_ ):
if hasattr(self , a_ ) and isinstance(getattr(self , a_ ) , a_ ):
setattr(getattr(self , a_ ) , ".".join(levels[i:] ) , a_ )
if l == last_level:
_a = val
else:
_a = pointer[l]
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
return self._pointer
def lowerCamelCase__ ( self , a_ , a_ ) ->Any:
'''simple docstring'''
with open(f'''{file_name}''' , "w" ) as stream:
dump(a_ , a_ )
def lowerCamelCase__ ( self , a_ , a_ ) ->int:
'''simple docstring'''
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(a_ , a_ )
@staticmethod
def lowerCamelCase__ ( a_ ) ->Union[str, Any]:
'''simple docstring'''
with open(a_ ) as stream:
_a = load(a_ , Loader=a_ )
return data
def __str__( self ) ->List[Any]:
'''simple docstring'''
_a = " "
if self._name != "root":
_a = f'''{t * (self._level-1)}{self._name}:\n'''
else:
_a = ""
_a = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(a_ , a_ ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(a_ ).__name__})\n'''
_a = level
return r[:-1]
@classmethod
def lowerCamelCase__ ( cls , a_ , **a_ ) ->Tuple:
'''simple docstring'''
_a , _a = cls.get_config_dict(a_ , **a_ )
return cls(a_ )
@classmethod
def lowerCamelCase__ ( cls , a_ , **a_ ) ->List[Any]:
'''simple docstring'''
_a = kwargs.pop("cache_dir" , a_ )
_a = kwargs.pop("force_download" , a_ )
_a = kwargs.pop("resume_download" , a_ )
_a = kwargs.pop("proxies" , a_ )
_a = kwargs.pop("local_files_only" , a_ )
if os.path.isdir(a_ ):
_a = os.path.join(a_ , a_ )
elif os.path.isfile(a_ ) or is_remote_url(a_ ):
_a = pretrained_model_name_or_path
else:
_a = hf_bucket_url(a_ , filename=a_ , use_cdn=a_ )
try:
# Load from URL or cache if already cached
_a = cached_path(
a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_a = Config.load_yaml(a_ )
except EnvironmentError:
_a = "Can't load config for"
raise EnvironmentError(a_ )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(a_ ), kwargs
def lowerCAmelCase ( UpperCamelCase_: Optional[int] ) -> Tuple:
'''simple docstring'''
_a = torch.load("dump.pt" , map_location=in_tensor.device )
_a = in_tensor.numpy()
_a = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(UpperCamelCase_ , UpperCamelCase_ , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(UpperCamelCase_ , UpperCamelCase_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowerCAmelCase ( UpperCamelCase_: str ) -> List[str]:
'''simple docstring'''
_a = urlparse(UpperCamelCase_ )
return parsed.scheme in ("http", "https")
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Dict=True ) -> str:
'''simple docstring'''
_a = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_a = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def lowerCAmelCase ( UpperCamelCase_: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Tuple=0 , UpperCamelCase_: Union[str, Any]=None , ) -> List[str]:
'''simple docstring'''
_a = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
ua += "; " + "; ".join("{}/{}".format(UpperCamelCase_ , UpperCamelCase_ ) for k, v in user_agent.items() )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
ua += "; " + user_agent
_a = {"user-agent": ua}
if resume_size > 0:
_a = "bytes=%d-" % (resume_size,)
_a = requests.get(UpperCamelCase_ , stream=UpperCamelCase_ , proxies=UpperCamelCase_ , headers=UpperCamelCase_ )
if response.status_code == 416: # Range not satisfiable
return
_a = response.headers.get("Content-Length" )
_a = resume_size + int(UpperCamelCase_ ) if content_length is not None else None
_a = tqdm(
unit="B" , unit_scale=UpperCamelCase_ , total=UpperCamelCase_ , initial=UpperCamelCase_ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(UpperCamelCase_ ) )
temp_file.write(UpperCamelCase_ )
progress.close()
def lowerCAmelCase ( UpperCamelCase_: Optional[int] , UpperCamelCase_: str=None , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: int=10 , UpperCamelCase_: List[str]=False , UpperCamelCase_: int=None , UpperCamelCase_: List[str]=False , ) -> int:
'''simple docstring'''
if cache_dir is None:
_a = TRANSFORMERS_CACHE
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = str(UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
_a = None
if not local_files_only:
try:
_a = requests.head(UpperCamelCase_ , allow_redirects=UpperCamelCase_ , proxies=UpperCamelCase_ , timeout=UpperCamelCase_ )
if response.status_code == 200:
_a = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_a = url_to_filename(UpperCamelCase_ , UpperCamelCase_ )
# get cache path to put the file
_a = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(UpperCamelCase_ ):
return cache_path
else:
_a = [
file
for file in fnmatch.filter(os.listdir(UpperCamelCase_ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(UpperCamelCase_ ) > 0:
return os.path.join(UpperCamelCase_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(UpperCamelCase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_a = cache_path + ".lock"
with FileLock(UpperCamelCase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(UpperCamelCase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_a = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(UpperCamelCase_ , "a+b" ) as f:
yield f
_a = _resumable_file_manager
if os.path.exists(UpperCamelCase_ ):
_a = os.stat(UpperCamelCase_ ).st_size
else:
_a = 0
else:
_a = partial(tempfile.NamedTemporaryFile , dir=UpperCamelCase_ , delete=UpperCamelCase_ )
_a = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , UpperCamelCase_ , temp_file.name , )
http_get(
UpperCamelCase_ , UpperCamelCase_ , proxies=UpperCamelCase_ , resume_size=UpperCamelCase_ , user_agent=UpperCamelCase_ , )
os.replace(temp_file.name , UpperCamelCase_ )
_a = {"url": url, "etag": etag}
_a = cache_path + ".json"
with open(UpperCamelCase_ , "w" ) as meta_file:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return cache_path
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: Dict=None ) -> Any:
'''simple docstring'''
_a = url.encode("utf-8" )
_a = shaaaa(UpperCamelCase_ )
_a = url_hash.hexdigest()
if etag:
_a = etag.encode("utf-8" )
_a = shaaaa(UpperCamelCase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowerCAmelCase ( UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Tuple=False , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: str=False , UpperCamelCase_: Dict=None , UpperCamelCase_: int=False , UpperCamelCase_: Dict=False , UpperCamelCase_: Any=False , ) -> List[str]:
'''simple docstring'''
if cache_dir is None:
_a = TRANSFORMERS_CACHE
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = str(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = str(UpperCamelCase_ )
if is_remote_url(UpperCamelCase_ ):
# URL, so get it from the cache (downloading if necessary)
_a = get_from_cache(
UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , user_agent=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
elif os.path.exists(UpperCamelCase_ ):
# File, and it exists.
_a = url_or_filename
elif urlparse(UpperCamelCase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(UpperCamelCase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(UpperCamelCase_ ) )
if extract_compressed_file:
if not is_zipfile(UpperCamelCase_ ) and not tarfile.is_tarfile(UpperCamelCase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_a , _a = os.path.split(UpperCamelCase_ )
_a = output_file.replace("." , "-" ) + "-extracted"
_a = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ) and os.listdir(UpperCamelCase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_a = output_path + ".lock"
with FileLock(UpperCamelCase_ ):
shutil.rmtree(UpperCamelCase_ , ignore_errors=UpperCamelCase_ )
os.makedirs(UpperCamelCase_ )
if is_zipfile(UpperCamelCase_ ):
with ZipFile(UpperCamelCase_ , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase_ )
zip_file.close()
elif tarfile.is_tarfile(UpperCamelCase_ ):
_a = tarfile.open(UpperCamelCase_ )
tar_file.extractall(UpperCamelCase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(UpperCamelCase_ ) )
return output_path_extracted
return output_path
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: str="," ) -> Tuple:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
with open(UpperCamelCase_ ) as f:
_a = eval(f.read() )
else:
_a = requests.get(UpperCamelCase_ )
try:
_a = requests.json()
except Exception:
_a = req.content.decode()
assert data is not None, "could not connect"
try:
_a = eval(UpperCamelCase_ )
except Exception:
_a = data.split("\n" )
req.close()
return data
def lowerCAmelCase ( UpperCamelCase_: Dict ) -> Union[str, Any]:
'''simple docstring'''
_a = requests.get(UpperCamelCase_ )
_a = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase ( UpperCamelCase_: int ) -> Tuple:
'''simple docstring'''
_a = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(UpperCamelCase_ )
with open(UpperCamelCase_ , "rb" ) as stream:
_a = pkl.load(UpperCamelCase_ )
_a = weights.pop("model" )
_a = {}
for k, v in model.items():
_a = torch.from_numpy(UpperCamelCase_ )
if "running_var" in k:
_a = torch.tensor([0] )
_a = k.replace("running_var" , "num_batches_tracked" )
_a = zero
return new
def lowerCAmelCase ( ) -> str:
'''simple docstring'''
print(f'''{os.path.abspath(os.path.join(UpperCamelCase_ , os.pardir ) )}/demo.ipynb''' )
def lowerCAmelCase ( UpperCamelCase_: List[str] , UpperCamelCase_: List[str]="RGB" ) -> List[Any]:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
_a = cva.imread(UpperCamelCase_ )
else:
_a = get_image_from_url(UpperCamelCase_ )
assert img is not None, f'''could not connect to: {im}'''
_a = cva.cvtColor(UpperCamelCase_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_a = img[:, :, ::-1]
return img
def lowerCAmelCase ( UpperCamelCase_: Any , UpperCamelCase_: str=1 ) -> Any:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ))
| 612 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> int:
while b:
__A ,__A : str = b, a % b
return a
def _lowerCAmelCase ( __snake_case : int , __snake_case : int ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__snake_case , a % b )
def _lowerCAmelCase ( ) -> Any:
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main() | 8 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case : str = logging.get_logger(__name__)
snake_case : List[str] = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = '''deformable_detr'''
UpperCAmelCase__ : Tuple = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self :Optional[int] ,__snake_case :List[Any]=True ,__snake_case :str=None ,__snake_case :Optional[Any]=3 ,__snake_case :int=3_00 ,__snake_case :Optional[int]=10_24 ,__snake_case :Union[str, Any]=6 ,__snake_case :Optional[int]=10_24 ,__snake_case :List[str]=8 ,__snake_case :Optional[Any]=6 ,__snake_case :int=10_24 ,__snake_case :List[str]=8 ,__snake_case :List[str]=0.0 ,__snake_case :Optional[int]=True ,__snake_case :Any="relu" ,__snake_case :List[str]=2_56 ,__snake_case :List[str]=0.1 ,__snake_case :Dict=0.0 ,__snake_case :Optional[int]=0.0 ,__snake_case :List[Any]=0.02 ,__snake_case :Union[str, Any]=1.0 ,__snake_case :List[str]=True ,__snake_case :Union[str, Any]=False ,__snake_case :List[Any]="sine" ,__snake_case :Tuple="resnet50" ,__snake_case :Dict=True ,__snake_case :Tuple=False ,__snake_case :str=4 ,__snake_case :Union[str, Any]=4 ,__snake_case :List[Any]=4 ,__snake_case :Optional[Any]=False ,__snake_case :str=3_00 ,__snake_case :Tuple=False ,__snake_case :Union[str, Any]=1 ,__snake_case :str=5 ,__snake_case :str=2 ,__snake_case :Dict=1 ,__snake_case :Any=1 ,__snake_case :Union[str, Any]=5 ,__snake_case :Tuple=2 ,__snake_case :Any=0.1 ,__snake_case :str=0.25 ,__snake_case :int=False ,**__snake_case :Optional[int] ,) -> Tuple:
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
a__ = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__snake_case ,__snake_case ):
a__ = backbone_config.get('model_type' )
a__ = CONFIG_MAPPING[backbone_model_type]
a__ = config_class.from_dict(__snake_case )
a__ = use_timm_backbone
a__ = backbone_config
a__ = num_channels
a__ = num_queries
a__ = max_position_embeddings
a__ = d_model
a__ = encoder_ffn_dim
a__ = encoder_layers
a__ = encoder_attention_heads
a__ = decoder_ffn_dim
a__ = decoder_layers
a__ = decoder_attention_heads
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = activation_function
a__ = init_std
a__ = init_xavier_std
a__ = encoder_layerdrop
a__ = auxiliary_loss
a__ = position_embedding_type
a__ = backbone
a__ = use_pretrained_backbone
a__ = dilation
# deformable attributes
a__ = num_feature_levels
a__ = encoder_n_points
a__ = decoder_n_points
a__ = two_stage
a__ = two_stage_num_proposals
a__ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
a__ = class_cost
a__ = bbox_cost
a__ = giou_cost
# Loss coefficients
a__ = mask_loss_coefficient
a__ = dice_loss_coefficient
a__ = bbox_loss_coefficient
a__ = giou_loss_coefficient
a__ = eos_coefficient
a__ = focal_alpha
a__ = disable_custom_kernels
super().__init__(is_encoder_decoder=__snake_case ,**__snake_case )
@property
def lowerCamelCase__( self :Dict ) -> int:
return self.encoder_attention_heads
@property
def lowerCamelCase__( self :int ) -> int:
return self.d_model
def lowerCamelCase__( self :List[str] ) -> str:
a__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
a__ = self.backbone_config.to_dict()
a__ = self.__class__.model_type
return output
| 335 | 0 |
from collections import deque
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :str, snake_case :str, snake_case :int, snake_case :int):
"""simple docstring"""
_lowercase =process_name # process name
_lowercase =arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_lowercase =arrival_time
_lowercase =burst_time # remaining burst time
_lowercase =0 # total time of the process wait in ready queue
_lowercase =0 # time from arrival time to completion time
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Dict, snake_case :int, snake_case :list[int], snake_case :deque[Process], snake_case :int, ):
"""simple docstring"""
_lowercase =number_of_queues
# time slice of queues that round robin algorithm applied
_lowercase =time_slices
# unfinished process is in this ready_queue
_lowercase =queue
# current time
_lowercase =current_time
# finished process is in this sequence queue
_lowercase =deque()
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
_lowercase =[]
for i in range(len(self.finish_queue)):
sequence.append(self.finish_queue[i].process_name)
return sequence
def UpperCamelCase__ ( self :Any, snake_case :list[Process]):
"""simple docstring"""
_lowercase =[]
for i in range(len(__a)):
waiting_times.append(queue[i].waiting_time)
return waiting_times
def UpperCamelCase__ ( self :Optional[int], snake_case :list[Process]):
"""simple docstring"""
_lowercase =[]
for i in range(len(__a)):
turnaround_times.append(queue[i].turnaround_time)
return turnaround_times
def UpperCamelCase__ ( self :int, snake_case :list[Process]):
"""simple docstring"""
_lowercase =[]
for i in range(len(__a)):
completion_times.append(queue[i].stop_time)
return completion_times
def UpperCamelCase__ ( self :Optional[int], snake_case :deque[Process]):
"""simple docstring"""
return [q.burst_time for q in queue]
def UpperCamelCase__ ( self :int, snake_case :Process):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def UpperCamelCase__ ( self :List[Any], snake_case :deque[Process]):
"""simple docstring"""
_lowercase =deque() # sequence deque of finished process
while len(__a) != 0:
_lowercase =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__a)
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_lowercase =0
# set the process's turnaround time because it is finished
_lowercase =self.current_time - cp.arrival_time
# set the completion time
_lowercase =self.current_time
# add the process to queue that has finished queue
finished.append(__a)
self.finish_queue.extend(__a) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def UpperCamelCase__ ( self :Optional[int], snake_case :deque[Process], snake_case :int):
"""simple docstring"""
_lowercase =deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__a)):
_lowercase =ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__a)
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_lowercase =self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__a)
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_lowercase =0
# set the finish time
_lowercase =self.current_time
# update the process' turnaround time because it is finished
_lowercase =self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__a)
self.finish_queue.extend(__a) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def UpperCamelCase__ ( self :int):
"""simple docstring"""
for i in range(self.number_of_queues - 1):
_lowercase =self.round_robin(
self.ready_queue, self.time_slices[i])
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue)
return self.finish_queue
if __name__ == "__main__":
import doctest
_SCREAMING_SNAKE_CASE = Process("P1", 0, 53)
_SCREAMING_SNAKE_CASE = Process("P2", 0, 17)
_SCREAMING_SNAKE_CASE = Process("P3", 0, 68)
_SCREAMING_SNAKE_CASE = Process("P4", 0, 24)
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = [17, 25]
_SCREAMING_SNAKE_CASE = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_SCREAMING_SNAKE_CASE = Process("P1", 0, 53)
_SCREAMING_SNAKE_CASE = Process("P2", 0, 17)
_SCREAMING_SNAKE_CASE = Process("P3", 0, 68)
_SCREAMING_SNAKE_CASE = Process("P4", 0, 24)
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = [17, 25]
_SCREAMING_SNAKE_CASE = deque([Pa, Pa, Pa, Pa])
_SCREAMING_SNAKE_CASE = MLFQ(number_of_queues, time_slices, queue, 0)
_SCREAMING_SNAKE_CASE = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 720 |
def _snake_case (_snake_case : str , _snake_case : str) -> float:
def get_matched_characters(_snake_case : str , _snake_case : str) -> str:
_lowercase =[]
_lowercase =min(len(_stra) , len(_stra)) // 2
for i, l in enumerate(_stra):
_lowercase =int(max(0 , i - limit))
_lowercase =int(min(i + limit + 1 , len(_stra)))
if l in _stra[left:right]:
matched.append(_snake_case)
_lowercase =f'''{_stra[0:_stra.index(_snake_case)]} {_stra[_stra.index(_snake_case) + 1:]}'''
return "".join(_snake_case)
# matching characters
_lowercase =get_matched_characters(_snake_case , _snake_case)
_lowercase =get_matched_characters(_snake_case , _snake_case)
_lowercase =len(_snake_case)
# transposition
_lowercase =(
len([(ca, ca) for ca, ca in zip(_snake_case , _snake_case) if ca != ca]) // 2
)
if not match_count:
_lowercase =0.0
else:
_lowercase =(
1
/ 3
* (
match_count / len(_snake_case)
+ match_count / len(_snake_case)
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowercase =0
for ca, ca in zip(stra[:4] , stra[:4]):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 557 | 0 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : Any=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
lowercase__ : Tuple = nn.Parameter(lowerCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
lowercase__ : List[str] = nn.Parameter(lowerCamelCase__ )
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : str ):
# set torch weights for 1-to-1 comparison
lowercase__ : int = np.asarray(weights[0] )
lowercase__ : Dict = np.asarray(weights[1] )
lowercase__ : int = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] ):
# set torch weights for 1-to-1 comparison
lowercase__ : str = np.asarray(weights[0] )
lowercase__ : Any = np.asarray(weights[1] )
lowercase__ : Tuple = np.asarray(weights[2] )
lowercase__ : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCamelCase__ ).view(-1 , lowerCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def _lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] ):
# layernorm 1
lowercase__ : Optional[int] = weights[0][0][0]
lowercase__ : Union[str, Any] = np.asarray(layer_norm_a[0] )
lowercase__ : Optional[int] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# lsh weights + output
lowercase__ : int = weights[0][1]
if len(lowerCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
else:
set_layer_weights_in_torch_local(lowerCamelCase__ , torch_block.attention , lowerCamelCase__ )
# intermediate weighs
lowercase__ : Union[str, Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCamelCase__ ) == 4:
lowercase__ : str = intermediate_weights[2]
# layernorm 2
lowercase__ : Union[str, Any] = np.asarray(intermediate_weights[0][0] )
lowercase__ : int = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# intermediate dense
lowercase__ : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowercase__ : Any = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
# intermediate out
lowercase__ : Dict = np.asarray(intermediate_weights[4][0] )
lowercase__ : Dict = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ):
# reformer model
lowercase__ : List[str] = torch_model.reformer
# word embeds
lowercase__ : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCamelCase__ ) , )
if isinstance(weights[3] , lowerCamelCase__ ):
lowercase__ : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowercase__ : Optional[int] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
lowercase__ : List[Any] = nn.Parameter(torch.tensor(lowerCamelCase__ ) )
lowercase__ : Any = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowercase__ : Any = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# output layer norm
lowercase__ : Union[str, Any] = np.asarray(weights[7][0] )
lowercase__ : Union[str, Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCamelCase__ ) , torch.tensor(lowerCamelCase__ ) , )
# output embeddings
lowercase__ : Tuple = np.asarray(weights[9][0] )
lowercase__ : List[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCamelCase__ ) , )
def _lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str ):
# Initialise PyTorch model
lowercase__ : str = ReformerConfig.from_json_file(lowerCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
lowercase__ : List[str] = ReformerModelWithLMHead(lowerCamelCase__ )
with open(lowerCamelCase__ , """rb""" ) as f:
lowercase__ : List[str] = pickle.load(lowerCamelCase__ )["""weights"""]
set_model_weights_in_torch(lowerCamelCase__ , lowerCamelCase__ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase__ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__snake_case = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 200 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , ) -> int:
lowercase__ : int = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : Union[str, Any] = num_channels
lowercase__ : Tuple = image_size
lowercase__ : str = min_resolution
lowercase__ : Optional[Any] = max_resolution
lowercase__ : Union[str, Any] = do_resize
lowercase__ : Dict = size
lowercase__ : Optional[Any] = do_normalize
def UpperCAmelCase__( self ) -> Any:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_a : List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : str = ImageGPTImageProcessingTester(self )
@property
def UpperCAmelCase__( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__( self ) -> int:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """clusters""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_normalize""" ) )
def UpperCAmelCase__( self ) -> Any:
lowercase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase__ : Optional[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[int] = os.path.join(lowerCamelCase__ , """image_processor.json""" )
image_processor_first.to_json_file(lowerCamelCase__ )
lowercase__ : str = self.image_processing_class.from_json_file(lowerCamelCase__ ).to_dict()
lowercase__ : Optional[int] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase__ )
lowercase__ : Union[str, Any] = self.image_processing_class.from_pretrained(lowerCamelCase__ ).to_dict()
lowercase__ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCAmelCase__( self ) -> Dict:
pass
def _lowerCamelCase ( ):
lowercase__ : Tuple = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase__ : Optional[int] = Image.open(dataset[4]["""file"""] )
lowercase__ : Union[str, Any] = Image.open(dataset[5]["""file"""] )
lowercase__ : Optional[int] = [imagea, imagea]
return images
@require_vision
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__( self ) -> str:
lowercase__ : Optional[int] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase__ : int = prepare_images()
# test non-batched
lowercase__ : int = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
lowercase__ : Any = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase__ )
# test batched
lowercase__ : str = image_processing(lowerCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
lowercase__ : Optional[int] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase__ ) | 200 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=3 , SCREAMING_SNAKE_CASE__ : List[str]=32 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : int=10 , SCREAMING_SNAKE_CASE__ : Optional[int]=[8, 16, 32, 64] , SCREAMING_SNAKE_CASE__ : int=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]="relu" , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Dict=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE__ : List[str]=[2, 3, 4] , SCREAMING_SNAKE_CASE__ : List[str]=1 , ) -> Union[str, Any]:
A : Tuple =parent
A : List[Any] =batch_size
A : Any =image_size
A : int =num_channels
A : str =embeddings_size
A : Any =hidden_sizes
A : Dict =depths
A : Optional[int] =is_training
A : Any =use_labels
A : Optional[Any] =hidden_act
A : Optional[Any] =num_labels
A : Optional[Any] =scope
A : str =len(SCREAMING_SNAKE_CASE__ )
A : Any =out_features
A : List[Any] =out_indices
A : Dict =num_groups
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
A : Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : List[Any] =None
if self.use_labels:
A : str =ids_tensor([self.batch_size] , self.num_labels )
A : List[str] =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Any:
A : Any =BitModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : int =model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
A : List[Any] =self.num_labels
A : List[str] =BitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Any =model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
A : Any =BitBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Any =model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A : Any =None
A : int =BitBackbone(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Dict =model(SCREAMING_SNAKE_CASE__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
A : Optional[Any] =self.prepare_config_and_inputs()
A , A , A : Tuple =config_and_inputs
A : Optional[Any] ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowercase : Union[str, Any] = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
lowercase : str = False
lowercase : List[Any] = False
lowercase : List[str] = False
lowercase : str = False
lowercase : str = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
A : List[str] =BitModelTester(self )
A : Tuple =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Dict:
return
@unittest.skip(reason='Bit does not output attentions' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE_ ( self : str ) -> int:
A , A : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : int =model_class(SCREAMING_SNAKE_CASE__ )
A : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : str =[*signature.parameters.keys()]
A : List[Any] =['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
A : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]:
A , A : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : str =model_class(config=SCREAMING_SNAKE_CASE__ )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[Any]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ):
A : Union[str, Any] =model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
A : Tuple =model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
A : List[str] =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A : Dict =self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A , A : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
A : Union[str, Any] =['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
A : str =layer_type
A : Union[str, Any] =True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Union[str, Any] =True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Dict:
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
A : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : str =BitModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def A__ ( ) -> List[str]:
A : Any =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
A : int =BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =self.default_image_processor
A : str =prepare_img()
A : List[Any] =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
A : Tuple =model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
A : List[str] =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
A : List[str] =torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : int = (BitBackbone,) if is_torch_available() else ()
lowercase : Dict = BitConfig
lowercase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
A : Any =BitModelTester(self )
| 661 | import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Optional[int] =get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] = XLMRobertaTokenizer
lowercase : Dict = XLMRobertaTokenizerFast
lowercase : str = True
lowercase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
A : List[str] ='<pad>'
A : int =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
A : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
A : Union[str, Any] =XLMRobertaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A : Any =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A : Tuple =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A : Union[str, Any] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Optional[int]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A : Any =(self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A : List[Any] =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : Dict =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A : str =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A : List[str] =tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Dict =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=True
A : Optional[int] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Checks everything loads correctly in the same way
A : Tuple =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : Tuple =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
# Save tokenizer rust, legacy_format=False
A : List[Any] =tempfile.mkdtemp()
A : Optional[int] =tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ )
A : str =tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A : List[Any] =tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE__ )
A : List[Any] =tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
shutil.rmtree(SCREAMING_SNAKE_CASE__ )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , f.name )
A : Optional[Any] =XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE__ )
A : int =pickle.dumps(SCREAMING_SNAKE_CASE__ )
pickle.loads(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
A : Union[str, Any] =self.get_tokenizer()
A : int =self.get_rust_tokenizer()
A : List[str] ='I was born in 92000, and this is falsé.'
A : Union[str, Any] =tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Any =tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
A : Tuple =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A : Optional[Any] =self.get_rust_tokenizer()
A : int =tokenizer.encode(SCREAMING_SNAKE_CASE__ )
A : Dict =rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[str]:
A : Any ='Hello World!'
A : Optional[Any] =[0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : Any =(
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
A : int =[
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
# fmt: off
A : List[Any] ={'input_ids': [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 661 | 1 |
'''simple docstring'''
def lowercase_ ( __A : int = 3 , __A : int = 7 , __A : int = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] =0
lowercase : List[Any] =1
for current_denominator in range(1 , limit + 1 ):
lowercase : Dict =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowercase : Tuple =current_numerator
lowercase : int =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_000_000))
| 94 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
SCREAMING_SNAKE_CASE = '▁'
# Segments (not really needed)
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 4
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = '''left'''
UpperCamelCase_ = XLNetTokenizer
def __init__( self : int , UpperCAmelCase : Dict=None , UpperCAmelCase : str=None , UpperCAmelCase : str=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Optional[Any]="<sep>" , UpperCAmelCase : Optional[int]="<pad>" , UpperCAmelCase : Optional[Any]="<cls>" , UpperCAmelCase : Dict="<mask>" , UpperCAmelCase : int=["<eop>", "<eod>"] , **UpperCAmelCase : List[Any] , ) -> List[str]:
'''simple docstring'''
lowercase : Dict =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , remove_space=UpperCAmelCase , keep_accents=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
lowercase : Tuple =3
lowercase : Union[str, Any] =do_lower_case
lowercase : Any =remove_space
lowercase : int =keep_accents
lowercase : int =vocab_file
lowercase : Union[str, Any] =False if not self.vocab_file else True
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Union[str, Any] =[self.sep_token_id]
lowercase : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[int] =[self.sep_token_id]
lowercase : Union[str, Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Dict =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 94 | 1 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
_snake_case : List[str] = NewType('DataClass', Any)
_snake_case : List[str] = NewType('DataClassType', Any)
def _A ( __snake_case :List[Any] ) -> List[Any]:
"""simple docstring"""
if isinstance(__snake_case , __snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def _A ( __snake_case :list ) -> Callable[[str], Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {str(__snake_case ): choice for choice in choices}
return lambda __snake_case : str_to_choice.get(__snake_case , __snake_case )
def _A ( *,
__snake_case :Union[str, List[str]] = None , __snake_case :str = None , __snake_case :Any = dataclasses.MISSING , __snake_case :Callable[[], Any] = dataclasses.MISSING , __snake_case :dict = None , **__snake_case :Dict , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__SCREAMING_SNAKE_CASE = {}
if aliases is not None:
__SCREAMING_SNAKE_CASE = aliases
if help is not None:
__SCREAMING_SNAKE_CASE = help
return dataclasses.field(metadata=__snake_case , default=__snake_case , default_factory=__snake_case , **__snake_case )
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, **_a ) -> Optional[Any]:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__SCREAMING_SNAKE_CASE = ArgumentDefaultsHelpFormatter
super().__init__(**_a )
if dataclasses.is_dataclass(_a ):
__SCREAMING_SNAKE_CASE = [dataclass_types]
__SCREAMING_SNAKE_CASE = list(_a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_a )
@staticmethod
def __lowerCAmelCase ( _a, _a ) -> str:
__SCREAMING_SNAKE_CASE = f'''--{field.name}'''
__SCREAMING_SNAKE_CASE = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, _a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
__SCREAMING_SNAKE_CASE = kwargs.pop("aliases", [] )
if isinstance(_a, _a ):
__SCREAMING_SNAKE_CASE = [aliases]
__SCREAMING_SNAKE_CASE = getattr(field.type, "__origin__", field.type )
if origin_type is Union or (hasattr(_a, "UnionType" ) and isinstance(_a, types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f''' Problem encountered in field \'{field.name}\'.''' )
if type(_a ) not in field.type.__args__:
# filter `str` in Union
__SCREAMING_SNAKE_CASE = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__SCREAMING_SNAKE_CASE = getattr(field.type, "__origin__", field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__SCREAMING_SNAKE_CASE = (
field.type.__args__[0] if isinstance(_a, field.type.__args__[1] ) else field.type.__args__[1]
)
__SCREAMING_SNAKE_CASE = getattr(field.type, "__origin__", field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__SCREAMING_SNAKE_CASE = {}
if origin_type is Literal or (isinstance(field.type, _a ) and issubclass(field.type, _a )):
if origin_type is Literal:
__SCREAMING_SNAKE_CASE = field.type.__args__
else:
__SCREAMING_SNAKE_CASE = [x.value for x in field.type]
__SCREAMING_SNAKE_CASE = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
__SCREAMING_SNAKE_CASE = field.default
else:
__SCREAMING_SNAKE_CASE = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__SCREAMING_SNAKE_CASE = copy(_a )
# Hack because type=bool in argparse does not behave as we want.
__SCREAMING_SNAKE_CASE = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__SCREAMING_SNAKE_CASE = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__SCREAMING_SNAKE_CASE = default
# This tells argparse we accept 0 or 1 value after --field_name
__SCREAMING_SNAKE_CASE = "?"
# This is the value that will get picked if we do --field_name (without value)
__SCREAMING_SNAKE_CASE = True
elif isclass(_a ) and issubclass(_a, _a ):
__SCREAMING_SNAKE_CASE = field.type.__args__[0]
__SCREAMING_SNAKE_CASE = "+"
if field.default_factory is not dataclasses.MISSING:
__SCREAMING_SNAKE_CASE = field.default_factory()
elif field.default is dataclasses.MISSING:
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = field.type
if field.default is not dataclasses.MISSING:
__SCREAMING_SNAKE_CASE = field.default
elif field.default_factory is not dataclasses.MISSING:
__SCREAMING_SNAKE_CASE = field.default_factory()
else:
__SCREAMING_SNAKE_CASE = True
parser.add_argument(_a, *_a, **_a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__SCREAMING_SNAKE_CASE = False
parser.add_argument(f'''--no_{field.name}''', action="store_false", dest=field.name, **_a )
def __lowerCAmelCase ( self, _a ) -> Optional[Any]:
if hasattr(_a, "_argument_group_name" ):
__SCREAMING_SNAKE_CASE = self.add_argument_group(dtype._argument_group_name )
else:
__SCREAMING_SNAKE_CASE = self
try:
__SCREAMING_SNAKE_CASE = get_type_hints(_a )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_a ):
__SCREAMING_SNAKE_CASE = ".".join(map(_a, sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(_a ):
if not field.init:
continue
__SCREAMING_SNAKE_CASE = type_hints[field.name]
self._parse_dataclass_field(_a, _a )
def __lowerCAmelCase ( self, _a=None, _a=False, _a=True, _a=None, _a=None, ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__SCREAMING_SNAKE_CASE = []
if args_filename:
args_files.append(Path(_a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__SCREAMING_SNAKE_CASE = ArgumentParser()
args_file_parser.add_argument(_a, type=_a, action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = args_file_parser.parse_known_args(args=_a )
__SCREAMING_SNAKE_CASE = vars(_a ).get(args_file_flag.lstrip("-" ), _a )
if cmd_args_file_paths:
args_files.extend([Path(_a ) for p in cmd_args_file_paths] )
__SCREAMING_SNAKE_CASE = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__SCREAMING_SNAKE_CASE = file_args + args if args is not None else file_args + sys.argv[1:]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.parse_known_args(args=_a )
__SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
__SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(_a ) if f.init}
__SCREAMING_SNAKE_CASE = {k: v for k, v in vars(_a ).items() if k in keys}
for k in keys:
delattr(_a, _a )
__SCREAMING_SNAKE_CASE = dtype(**_a )
outputs.append(_a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def __lowerCAmelCase ( self, _a, _a = False ) -> Tuple[DataClass, ...]:
__SCREAMING_SNAKE_CASE = set(args.keys() )
__SCREAMING_SNAKE_CASE = []
for dtype in self.dataclass_types:
__SCREAMING_SNAKE_CASE = {f.name for f in dataclasses.fields(_a ) if f.init}
__SCREAMING_SNAKE_CASE = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__SCREAMING_SNAKE_CASE = dtype(**_a )
outputs.append(_a )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(_a )}''' )
return tuple(_a )
def __lowerCAmelCase ( self, _a, _a = False ) -> Tuple[DataClass, ...]:
with open(Path(_a ), encoding="utf-8" ) as open_json_file:
__SCREAMING_SNAKE_CASE = json.loads(open_json_file.read() )
__SCREAMING_SNAKE_CASE = self.parse_dict(_a, allow_extra_keys=_a )
return tuple(_a )
def __lowerCAmelCase ( self, _a, _a = False ) -> Tuple[DataClass, ...]:
__SCREAMING_SNAKE_CASE = self.parse_dict(yaml.safe_load(Path(_a ).read_text() ), allow_extra_keys=_a )
return tuple(_a )
| 711 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =(DDIMParallelScheduler,)
SCREAMING_SNAKE_CASE__ =(("""eta""", 0.0), ("""num_inference_steps""", 50))
def __lowerCAmelCase ( self, **_a ) -> Dict:
__SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_a )
return config
def __lowerCAmelCase ( self, **_a ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(**_a )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 10, 0.0
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for t in scheduler.timesteps:
__SCREAMING_SNAKE_CASE = model(_a, _a )
__SCREAMING_SNAKE_CASE = scheduler.step(_a, _a, _a, _a ).prev_sample
return sample
def __lowerCAmelCase ( self ) -> List[Any]:
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_a )
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(steps_offset=1 )
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps, torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def __lowerCAmelCase ( self ) -> List[str]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_a, beta_end=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_a )
def __lowerCAmelCase ( self ) -> Optional[Any]:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_a )
def __lowerCAmelCase ( self ) -> List[str]:
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_a, prediction_type=_a, sample_max_value=_a, )
def __lowerCAmelCase ( self ) -> Dict:
for t in [1, 10, 49]:
self.check_over_forward(time_step=_a )
def __lowerCAmelCase ( self ) -> Dict:
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 5_00] ):
self.check_over_forward(time_step=_a, num_inference_steps=_a )
def __lowerCAmelCase ( self ) -> Any:
for t, eta in zip([1, 10, 49], [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_a, eta=_a )
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20, 4_00 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80, 9_60 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0, 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87, 4_86 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99, 9_98 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 10, 0.0
scheduler.set_timesteps(_a )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1
__SCREAMING_SNAKE_CASE = samplea.shape[0]
__SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea], dim=0 )
__SCREAMING_SNAKE_CASE = torch.arange(_a )[0:3, None].repeat(1, _a )
__SCREAMING_SNAKE_CASE = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
__SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(_a, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ), _a )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def __lowerCAmelCase ( self ) -> int:
__SCREAMING_SNAKE_CASE = self.full_loop()
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.full_loop(prediction_type="v_prediction" )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def __lowerCAmelCase ( self ) -> List[Any]:
# We specify different beta, so that the first alpha is 0.99
__SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=_a, beta_start=0.01 )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
# We specify different beta, so that the first alpha is 0.99
__SCREAMING_SNAKE_CASE = self.full_loop(set_alpha_to_one=_a, beta_start=0.01 )
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 214 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 606 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
__a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 606 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=0.6 , lowerCamelCase=None , ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Tuple = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Any = image_size
UpperCamelCase : Dict = patch_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : Tuple = use_labels
UpperCamelCase : Dict = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : Any = intermediate_size
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Tuple = type_sequence_label_size
UpperCamelCase : int = initializer_range
UpperCamelCase : int = mask_ratio
UpperCamelCase : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase : Any = (image_size // patch_size) ** 2
UpperCamelCase : List[str] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Tuple = None
if self.use_labels:
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = ViTMAEModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Tuple = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Dict = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : List[Any] = model(lowerCamelCase )
UpperCamelCase : List[Any] = (self.image_size // self.patch_size) ** 2
UpperCamelCase : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase : Dict = 1
UpperCamelCase : str = ViTMAEForPreTraining(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
UpperCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase : Union[str, Any] = model(lowerCamelCase )
UpperCamelCase : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Any = self.prepare_config_and_inputs()
UpperCamelCase : Any = config_and_inputs
UpperCamelCase : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = ViTMAEModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(lowerCamelCase )
UpperCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : List[Any] = [*signature.parameters.keys()]
UpperCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase : List[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCamelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase : Optional[Any] = torch.from_numpy(lowerCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase : Optional[int] = pt_noise
super().check_pt_tf_models(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase : Dict = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
UpperCamelCase : Any = outputs[0].cpu().numpy()
UpperCamelCase : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase )
UpperCamelCase : str = model_class.from_pretrained(lowerCamelCase )
model.to(lowerCamelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
# Make sure we don't have nans
UpperCamelCase : Optional[Any] = after_outputs[0].cpu().numpy()
UpperCamelCase : List[str] = 0
UpperCamelCase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : Optional[Any] = ViTMAEModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def A__ ( ):
'''simple docstring'''
UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
np.random.seed(2 )
UpperCamelCase : Tuple = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(lowerCamelCase )
UpperCamelCase : str = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : int = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase : List[str] = ViTMAEConfig()
UpperCamelCase : Tuple = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase : List[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCamelCase : Union[str, Any] = model(**lowerCamelCase , noise=torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) )
# verify the logits
UpperCamelCase : Any = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
UpperCamelCase : Optional[int] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase ) , atol=1e-4 ) )
| 708 |
'''simple docstring'''
lowerCAmelCase_ = 0 # The first color of the flag.
lowerCAmelCase_ = 1 # The second color of the flag.
lowerCAmelCase_ = 2 # The third color of the flag.
lowerCAmelCase_ = (red, white, blue)
def A__ ( A : list):
'''simple docstring'''
if not sequence:
return []
if len(A) == 1:
return list(A)
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Any = len(A) - 1
UpperCamelCase : Union[str, Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCamelCase , UpperCamelCase : List[str] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCamelCase , UpperCamelCase : Tuple = sequence[high], sequence[mid]
high -= 1
else:
UpperCamelCase : Union[str, Any] = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(A)
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ = input('Enter numbers separated by commas:\n').strip()
lowerCAmelCase_ = [int(item.strip()) for item in user_input.split(',')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 435 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _a ( ) -> Dict:
a = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=a )
a = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=a )
env_command_parser(subparsers=a )
launch_command_parser(subparsers=a )
tpu_command_parser(subparsers=a )
test_command_parser(subparsers=a )
# Let's go
a = parser.parse_args()
if not hasattr(a , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(a )
if __name__ == "__main__":
main()
| 117 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str=100 , __UpperCAmelCase : List[str]=13 , __UpperCAmelCase : Optional[int]=30 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : str=True , __UpperCAmelCase : str=32 , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=37 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Tuple=10 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : str=3 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Union[str, Any]=[0, 1, 2, 3] , ) ->Dict:
"""simple docstring"""
a = parent
a = 100
a = batch_size
a = image_size
a = patch_size
a = num_channels
a = is_training
a = use_labels
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = type_sequence_label_size
a = initializer_range
a = scope
a = out_indices
a = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a = (image_size // patch_size) ** 2
a = num_patches + 1
def __lowerCAmelCase ( self : Dict ) ->Any:
"""simple docstring"""
a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a = None
a = None
if self.use_labels:
a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowerCAmelCase ( self : List[Any] ) ->int:
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) ->List[Any]:
"""simple docstring"""
a = BeitModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] ) ->List[str]:
"""simple docstring"""
a = BeitForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) ->Optional[int]:
"""simple docstring"""
a = self.type_sequence_label_size
a = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a = 1
a = BeitForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) ->int:
"""simple docstring"""
a = self.num_labels
a = BeitForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
a = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __lowerCAmelCase ( self : int ) ->List[str]:
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( lowercase , lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__snake_case = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
a = BeitModelTester(self )
a = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def __lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def __lowerCAmelCase ( self : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Optional[Any] ) ->str:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def __lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(__UpperCAmelCase )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple ) ->List[str]:
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def __lowerCAmelCase ( self : int ) ->Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]:
continue
a = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a = model(**__UpperCAmelCase ).loss
loss.backward()
def __lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a = False
a = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__UpperCAmelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
a = model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
a = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
a = model(**__UpperCAmelCase ).loss
loss.backward()
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
a = model_class(config=__UpperCAmelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __lowerCAmelCase ( self : List[str] ) ->Optional[int]:
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a = BeitModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def _a ( ) -> int:
a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
a = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(__UpperCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).pixel_values.to(__UpperCAmelCase )
# prepare bool_masked_pos
a = torch.ones((1, 196) , dtype=torch.bool ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
a = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase )
a = outputs.logits
# verify the logits
a = torch.Size((1, 196, 8_192) )
self.assertEqual(logits.shape , __UpperCAmelCase )
a = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1e-2 ) )
@slow
def __lowerCAmelCase ( self : int ) ->Optional[int]:
"""simple docstring"""
a = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(__UpperCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__UpperCAmelCase )
a = outputs.logits
# verify the logits
a = torch.Size((1, 1_000) )
self.assertEqual(logits.shape , __UpperCAmelCase )
a = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
a = 281
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : int ) ->str:
"""simple docstring"""
a = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
__UpperCAmelCase )
a = self.default_image_processor
a = prepare_img()
a = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__UpperCAmelCase )
a = outputs.logits
# verify the logits
a = torch.Size((1, 21_841) )
self.assertEqual(logits.shape , __UpperCAmelCase )
a = torch.tensor([1.6881, -0.2787, 0.5901] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
a = 2_396
self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
a = model.to(__UpperCAmelCase )
a = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
a = Image.open(ds[0]['''file'''] )
a = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__UpperCAmelCase )
a = outputs.logits
# verify the logits
a = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , __UpperCAmelCase )
a = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
a = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=__UpperCAmelCase , )
else:
a = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
a = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
a = model.to(__UpperCAmelCase )
a = BeitImageProcessor(do_resize=__UpperCAmelCase , size=640 , do_center_crop=__UpperCAmelCase )
a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
a = Image.open(ds[0]['''file'''] )
a = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
a = model(**__UpperCAmelCase )
a = outputs.logits.detach().cpu()
a = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(500, 300)] )
a = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
a = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
a = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 117 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(snake_case__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(snake_case__ ):
http_head("""https://huggingface.co""" )
| 703 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
UpperCamelCase = '''sshleifer/bart-tiny-random'''
UpperCamelCase = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
@cached_property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return AutoConfig.from_pretrained(UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE = create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
with self.assertRaises(UpperCAmelCase_ ):
create_student_by_copying_alternating_layers(UpperCAmelCase_ , tempfile.mkdtemp() , e=UpperCAmelCase_ , d=UpperCAmelCase_ )
| 569 | 0 |
import argparse
import datetime
def _UpperCAmelCase ( UpperCamelCase: Tuple ):
"""simple docstring"""
__lowerCAmelCase = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
__lowerCAmelCase = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(__lowerCamelCase ) < 1_1:
raise ValueError("Must be 10 characters long" )
# Get month
__lowerCAmelCase = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 1_3:
raise ValueError("Month must be between 1 - 12" )
__lowerCAmelCase = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
__lowerCAmelCase = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 3_2:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
__lowerCAmelCase = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
__lowerCAmelCase = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 4_5 < y < 8_5_0_0:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
__lowerCAmelCase = datetime.date(int(__lowerCamelCase ) , int(__lowerCamelCase ) , int(__lowerCamelCase ) )
# Start math
if m <= 2:
__lowerCAmelCase = y - 1
__lowerCAmelCase = m + 1_2
# maths var
__lowerCAmelCase = int(str(__lowerCamelCase )[:2] )
__lowerCAmelCase = int(str(__lowerCamelCase )[2:] )
__lowerCAmelCase = int(2.6 * m - 5.39 )
__lowerCAmelCase = int(c / 4 )
__lowerCAmelCase = int(k / 4 )
__lowerCAmelCase = int(d + k )
__lowerCAmelCase = int(t + u + v + x )
__lowerCAmelCase = int(z - (2 * c) )
__lowerCAmelCase = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
__lowerCAmelCase = F"Your date {date_input}, is a {days[str(__lowerCamelCase )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
UpperCamelCase_ = parser.parse_args()
zeller(args.date_input)
| 611 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ =get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCamelCase__ =5_0003
UpperCamelCase__ =5_0002
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PLBartTokenizer
__snake_case = None
__snake_case = False
def UpperCamelCase_ ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
_SCREAMING_SNAKE_CASE : Union[str, Any] = PLBartTokenizer(__lowerCamelCase , language_codes="base" , keep_accents=__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : str = PLBartTokenizer(__lowerCamelCase , language_codes="base" , keep_accents=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.vocab_size
_SCREAMING_SNAKE_CASE : List[str] = [tokenizer.convert_ids_to_tokens(__lowerCamelCase ) for x in range(end - 4 , __lowerCamelCase )]
self.assertListEqual(__lowerCamelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] )
_SCREAMING_SNAKE_CASE : Dict = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_SCREAMING_SNAKE_CASE : Any = tokenizer(__lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) , __lowerCamelCase , )
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Any = PLBartTokenizer(__lowerCamelCase , language_codes="multi" , keep_accents=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
_SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = [tokenizer.convert_ids_to_tokens(__lowerCamelCase ) for x in range(end - 7 , __lowerCamelCase )]
self.assertListEqual(
__lowerCamelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
_SCREAMING_SNAKE_CASE : List[Any] = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
_SCREAMING_SNAKE_CASE : int = tokenizer(__lowerCamelCase ).input_ids
self.assertEqual(
tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase ) , __lowerCamelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
__snake_case = 'uclanlp/plbart-python-en_XX'
__snake_case = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__snake_case = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__snake_case = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def UpperCamelCase_ ( cls ) -> Dict:
_SCREAMING_SNAKE_CASE : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
_SCREAMING_SNAKE_CASE : Tuple = 1
return cls
def UpperCamelCase_ ( self ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Any:
self.assertIn(__lowerCamelCase , self.tokenizer.all_special_ids )
_SCREAMING_SNAKE_CASE : Dict = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
_SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : str = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 2_0]
self.assertIsInstance(src_text[0] , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = 1_0
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowerCamelCase )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = PLBartTokenizer.from_pretrained(__lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCamelCase )
@require_torch
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , return_tensors="pt" )
_SCREAMING_SNAKE_CASE : str = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __lowerCamelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_SCREAMING_SNAKE_CASE : List[str] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
_SCREAMING_SNAKE_CASE : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.src_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=3 , return_tensors="pt" )
_SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=1_0 , return_tensors="pt" )
_SCREAMING_SNAKE_CASE : List[Any] = targets["input_ids"]
_SCREAMING_SNAKE_CASE : Any = shift_tokens_right(__lowerCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(__lowerCamelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , ) | 249 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A ( __lowercase ):
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCAmelCase , "tf_padding" ) )
self.parent.assertTrue(hasattr(_lowerCAmelCase , "depth_multiplier" ) )
class A :
def __init__( self: str , _lowerCAmelCase: str , _lowerCAmelCase: List[Any]=13 , _lowerCAmelCase: Optional[int]=3 , _lowerCAmelCase: Any=32 , _lowerCAmelCase: Union[str, Any]=0.25 , _lowerCAmelCase: Optional[Any]=8 , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: int=1024 , _lowerCAmelCase: Optional[Any]=32 , _lowerCAmelCase: Dict="relu6" , _lowerCAmelCase: List[str]=0.1 , _lowerCAmelCase: str=0.02 , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: Tuple=10 , _lowerCAmelCase: int=None , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =num_channels
UpperCAmelCase_ =image_size
UpperCAmelCase_ =depth_multiplier
UpperCAmelCase_ =min_depth
UpperCAmelCase_ =tf_padding
UpperCAmelCase_ =int(last_hidden_size * depth_multiplier )
UpperCAmelCase_ =output_stride
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =classifier_dropout_prob
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =is_training
UpperCAmelCase_ =num_labels
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =scope
def lowerCAmelCase__ ( self: int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ =None
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ =self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =MobileNetVaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.num_labels
UpperCAmelCase_ =MobileNetVaForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =config_and_inputs
UpperCAmelCase_ ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_snake_case =(
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =MobileNetVaModelTester(self )
UpperCAmelCase_ =MobileNetVaConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[str] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def lowerCAmelCase__ ( self: Optional[int] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def lowerCAmelCase__ ( self: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =model_class(_lowerCAmelCase )
UpperCAmelCase_ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ =[*signature.parameters.keys()]
UpperCAmelCase_ =["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCAmelCase: int , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Tuple ):
UpperCAmelCase_ =model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ =model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCAmelCase_ =outputs.hidden_states
UpperCAmelCase_ =26
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ =True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ =MobileNetVaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[Any]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def lowerCAmelCase__ ( self: Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(_lowerCAmelCase )
UpperCAmelCase_ =self.default_image_processor
UpperCAmelCase_ =prepare_img()
UpperCAmelCase_ =image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(**_lowerCAmelCase )
# verify the logits
UpperCAmelCase_ =torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
UpperCAmelCase_ =torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 714 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: Tuple=13 , _lowerCAmelCase: Tuple=7 , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[Any]=True , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: List[Any]=99 , _lowerCAmelCase: str=24 , _lowerCAmelCase: List[str]=2 , _lowerCAmelCase: List[str]=6 , _lowerCAmelCase: Optional[int]=37 , _lowerCAmelCase: Dict="gelu" , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: List[str]=0.1 , _lowerCAmelCase: List[str]=512 , _lowerCAmelCase: List[str]=16 , _lowerCAmelCase: List[str]=2 , _lowerCAmelCase: Tuple=0.02 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: List[str]=None , _lowerCAmelCase: Optional[int]=1000 , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =batch_size
UpperCAmelCase_ =seq_length
UpperCAmelCase_ =is_training
UpperCAmelCase_ =use_input_mask
UpperCAmelCase_ =use_token_type_ids
UpperCAmelCase_ =use_labels
UpperCAmelCase_ =vocab_size
UpperCAmelCase_ =hidden_size
UpperCAmelCase_ =num_hidden_layers
UpperCAmelCase_ =num_attention_heads
UpperCAmelCase_ =intermediate_size
UpperCAmelCase_ =hidden_act
UpperCAmelCase_ =hidden_dropout_prob
UpperCAmelCase_ =attention_probs_dropout_prob
UpperCAmelCase_ =max_position_embeddings
UpperCAmelCase_ =type_vocab_size
UpperCAmelCase_ =type_sequence_label_size
UpperCAmelCase_ =initializer_range
UpperCAmelCase_ =num_labels
UpperCAmelCase_ =scope
UpperCAmelCase_ =range_bbox
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ =bbox[i, j, 3]
UpperCAmelCase_ =bbox[i, j, 1]
UpperCAmelCase_ =t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ =bbox[i, j, 2]
UpperCAmelCase_ =bbox[i, j, 0]
UpperCAmelCase_ =t
UpperCAmelCase_ =None
if self.use_input_mask:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =None
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ =self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase__ ( self: List[str] ) -> Any:
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: str , _lowerCAmelCase: List[str] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[int] , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =LiltModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , bbox=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
UpperCAmelCase_ =model(_lowerCAmelCase , bbox=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.num_labels
UpperCAmelCase_ =LiltForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(
_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =LiltForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
UpperCAmelCase_ =model(
_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) =config_and_inputs
UpperCAmelCase_ ={
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
_snake_case =(
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_snake_case =(
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Any , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any] ) -> Tuple:
'''simple docstring'''
return True
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =LiltModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ =type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self: Tuple ) -> int:
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ =LiltModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
@slow
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_lowerCAmelCase )
UpperCAmelCase_ =torch.tensor([[1, 2]] , device=_lowerCAmelCase )
UpperCAmelCase_ =torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ =model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase )
UpperCAmelCase_ =torch.Size([1, 2, 768] )
UpperCAmelCase_ =torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=_lowerCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _lowerCAmelCase , atol=1e-3 ) )
| 550 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCamelCase__ :
def __init__( self : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Dict=99 , lowercase__ : List[Any]=13 , lowercase__ : Union[str, Any]=7 , lowercase__ : int=9 , lowercase__ : List[str]=True , lowercase__ : str=True , lowercase__ : Any=False , lowercase__ : int=32 , lowercase__ : Any=5 , lowercase__ : int=4 , lowercase__ : int=37 , lowercase__ : List[str]=8 , lowercase__ : Optional[int]=0.1 , lowercase__ : List[Any]=0.0_0_2 , lowercase__ : str=1 , lowercase__ : List[str]=0 , lowercase__ : Optional[int]=0 , lowercase__ : List[str]=None , lowercase__ : Any=None , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = encoder_seq_length
_lowerCAmelCase = decoder_seq_length
# For common tests
_lowerCAmelCase = self.decoder_seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_attention_mask
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = d_ff
_lowerCAmelCase = relative_attention_num_buckets
_lowerCAmelCase = dropout_rate
_lowerCAmelCase = initializer_factor
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = decoder_start_token_id
_lowerCAmelCase = None
_lowerCAmelCase = decoder_layers
def SCREAMING_SNAKE_CASE__ ( self : str ):
return TaConfig.from_pretrained('google/umt5-base' )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : int , lowercase__ : int=None , lowercase__ : str=None , lowercase__ : int=None , lowercase__ : Optional[int]=None , lowercase__ : List[Any]=None , ):
if attention_mask is None:
_lowerCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_lowerCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_lowerCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowercase__ )
if decoder_head_mask is None:
_lowerCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowercase__ )
if cross_attn_head_mask is None:
_lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowercase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_lowerCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_lowerCAmelCase = self.get_config()
_lowerCAmelCase = config.num_attention_heads
_lowerCAmelCase = self.prepare_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, input_dict
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : List[str] , lowercase__ : str , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Dict , ):
_lowerCAmelCase = UMTaModel(config=lowercase__ )
model.to(lowercase__ )
model.eval()
_lowerCAmelCase = model(
input_ids=lowercase__ , decoder_input_ids=lowercase__ , attention_mask=lowercase__ , decoder_attention_mask=lowercase__ , )
_lowerCAmelCase = model(input_ids=lowercase__ , decoder_input_ids=lowercase__ )
_lowerCAmelCase = result.last_hidden_state
_lowerCAmelCase = result.past_key_values
_lowerCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowercase__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def SCREAMING_SNAKE_CASE__ ( self : Any , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Dict , lowercase__ : str , lowercase__ : List[Any] , ):
_lowerCAmelCase = UMTaModel(config=lowercase__ ).get_decoder().to(lowercase__ ).eval()
# first forward pass
_lowerCAmelCase = model(lowercase__ , use_cache=lowercase__ )
_lowerCAmelCase = model(lowercase__ )
_lowerCAmelCase = model(lowercase__ , use_cache=lowercase__ )
self.parent.assertTrue(len(lowercase__ ) == len(lowercase__ ) )
self.parent.assertTrue(len(lowercase__ ) == len(lowercase__ ) + 1 )
_lowerCAmelCase , _lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = model(lowercase__ )['last_hidden_state']
_lowerCAmelCase = model(lowercase__ , past_key_values=lowercase__ )['last_hidden_state']
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Tuple , lowercase__ : Dict , ):
_lowerCAmelCase = UMTaModel(config=lowercase__ ).to(lowercase__ ).half().eval()
_lowerCAmelCase = model(**lowercase__ )['last_hidden_state']
self.parent.assertFalse(torch.isnan(lowercase__ ).any().item() )
@require_torch
class lowerCamelCase__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCamelCase__ =(UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCamelCase__ =(
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCamelCase__ =True
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =True
UpperCamelCase__ =True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCamelCase__ =[0.8, 0.9]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = UMTaModel(config_and_inputs[0] ).to(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowercase__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=lowercase__ , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase = config_and_inputs[0]
_lowerCAmelCase = UMTaForConditionalGeneration(lowercase__ ).eval()
model.to(lowercase__ )
_lowerCAmelCase = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=lowercase__ ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase__ ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase__ ),
}
for attn_name, (name, mask) in zip(lowercase__ , head_masking.items() ):
_lowerCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_lowerCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowercase__ )
_lowerCAmelCase = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=lowercase__ , return_dict_in_generate=lowercase__ , **lowercase__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_lowerCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=lowercase__ ).to(lowercase__ )
_lowerCAmelCase = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=lowercase__ , legacy=lowercase__ )
_lowerCAmelCase = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
_lowerCAmelCase = tokenizer(lowercase__ , return_tensors='pt' , padding=lowercase__ ).input_ids
# fmt: off
_lowerCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowercase__ , lowercase__ )
_lowerCAmelCase = model.generate(input_ids.to(lowercase__ ) )
_lowerCAmelCase = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
_lowerCAmelCase = tokenizer.batch_decode(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
| 192 | from __future__ import annotations
from random import choice
def _lowerCamelCase ( snake_case ):
return choice(snake_case )
def _lowerCamelCase ( snake_case , snake_case ):
_lowerCAmelCase = random_pivot(snake_case )
# partition based on pivot
# linear time
_lowerCAmelCase = [e for e in lst if e < pivot]
_lowerCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(snake_case ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(snake_case ) < k - 1:
return kth_number(snake_case , k - len(snake_case ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(snake_case , snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 192 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) ->Any:
UpperCAmelCase = OmegaConf.load(lowerCAmelCase_ )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCAmelCase_ ) ) )
return config
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ) ->str:
if conf_path is None:
UpperCAmelCase = """./model_checkpoints/vqgan_only.yaml"""
UpperCAmelCase = load_config(lowerCAmelCase_ , display=lowerCAmelCase_ )
UpperCAmelCase = VQModel(**config.model.params )
if ckpt_path is None:
UpperCAmelCase = """./model_checkpoints/vqgan_only.pt"""
UpperCAmelCase = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
if ".ckpt" in ckpt_path:
UpperCAmelCase = sd["""state_dict"""]
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
del sd
return model
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Tuple:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = model.encode(lowerCAmelCase_ )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
UpperCAmelCase = model.decode(lowerCAmelCase_ )
return xrec
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) ->Tuple:
UpperCAmelCase , UpperCAmelCase = string.rsplit(""".""" , 1 )
if reload:
UpperCAmelCase = importlib.import_module(lowerCAmelCase_ )
importlib.reload(lowerCAmelCase_ )
return getattr(importlib.import_module(lowerCAmelCase_ , package=lowerCAmelCase_ ) , cls )
def _UpperCamelCase ( lowerCAmelCase_ ) ->Optional[Any]:
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True ) ->Optional[int]:
UpperCAmelCase = instantiate_from_config(lowerCAmelCase_ )
if sd is not None:
model.load_state_dict(lowerCAmelCase_ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
# load the specified checkpoint
if ckpt:
UpperCAmelCase = torch.load(lowerCAmelCase_ , map_location="""cpu""" )
UpperCAmelCase = pl_sd["""global_step"""]
print(F"""loaded model from global step {global_step}.""" )
else:
UpperCAmelCase = {"""state_dict""": None}
UpperCAmelCase = None
UpperCAmelCase = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=lowerCAmelCase_ , eval_mode=lowerCAmelCase_ )["""model"""]
return model, global_step
| 627 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class __lowercase ( __snake_case ):
UpperCamelCase = '''nllb-moe'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=1_2_8_1_1_2 , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : List[str]=1_6 , __lowerCamelCase : List[str]=1_2 , __lowerCamelCase : int=4_0_9_6 , __lowerCamelCase : Tuple=1_6 , __lowerCamelCase : str=0.05 , __lowerCamelCase : List[str]=0.05 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Tuple="float32" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=1_2_8 , __lowerCamelCase : List[str]=6_4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : str=0.001 , __lowerCamelCase : Optional[int]=0.001 , __lowerCamelCase : Tuple="all" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Dict=0.2 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : int=False , **__lowerCamelCase : str , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = router_z_loss_coef
UpperCAmelCase = router_aux_loss_coef
UpperCAmelCase = decoder_sparse_step
UpperCAmelCase = encoder_sparse_step
UpperCAmelCase = num_experts
UpperCAmelCase = expert_capacity
UpperCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCAmelCase = router_dtype
UpperCAmelCase = router_ignore_padding_tokens
UpperCAmelCase = batch_prioritized_routing
UpperCAmelCase = second_expert_policy
UpperCAmelCase = normalize_router_prob_before_dropping
UpperCAmelCase = moe_eval_capacity_token_fraction
UpperCAmelCase = moe_token_dropout
UpperCAmelCase = output_router_logits
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 627 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase : List[Any] ={
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] =[
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 228 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowerCamelCase : Dict =parse(importlib.metadata.version('''torch'''))
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
UpperCamelCase__ : Any = STR_OPERATION_TO_FUNC[operation]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : List[str] = parse(importlib.metadata.version(__lowerCAmelCase ) )
return operation(__lowerCAmelCase , parse(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
return compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) | 228 | 1 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for line in lines:
SCREAMING_SNAKE_CASE__ = re.sub(R"""#.*""" , """""" , snake_case__ ) # remove comments
if line:
filtered_lines.append(snake_case__ )
SCREAMING_SNAKE_CASE__ = """\n""".join(snake_case__ )
# Make a hash from all this code
SCREAMING_SNAKE_CASE__ = full_str.encode("""utf-8""" )
return shaaaa(snake_case__ ).hexdigest()
# get importable module names and hash for caching
A_ : Optional[int] = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
A_ : Tuple = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
A_ : str = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
A_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 616 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Dict = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCamelCase (A__ ):
lowerCamelCase__ : int = 'unispeech'
def __init__( self : Union[str, Any] , __UpperCAmelCase : List[Any]=3_2 , __UpperCAmelCase : Union[str, Any]=7_6_8 , __UpperCAmelCase : Tuple=1_2 , __UpperCAmelCase : Dict=1_2 , __UpperCAmelCase : Optional[Any]=3_0_7_2 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Union[str, Any]=1e-5 , __UpperCAmelCase : List[Any]="group" , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __UpperCAmelCase : List[str]=(5, 2, 2, 2, 2, 2, 2) , __UpperCAmelCase : int=(1_0, 3, 3, 3, 3, 2, 2) , __UpperCAmelCase : str=False , __UpperCAmelCase : Any=1_2_8 , __UpperCAmelCase : str=1_6 , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Union[str, Any]=0.05 , __UpperCAmelCase : str=1_0 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : Tuple=1_0 , __UpperCAmelCase : Tuple=0 , __UpperCAmelCase : Tuple=3_2_0 , __UpperCAmelCase : Any=2 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Any=1_0_0 , __UpperCAmelCase : str=2_5_6 , __UpperCAmelCase : Dict=2_5_6 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : List[str]="mean" , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : str=2_5_6 , __UpperCAmelCase : Dict=8_0 , __UpperCAmelCase : List[Any]=0 , __UpperCAmelCase : int=1 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=0.5 , **__UpperCAmelCase : List[str] , ) -> Tuple:
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = feat_extract_norm
SCREAMING_SNAKE_CASE__ = feat_extract_activation
SCREAMING_SNAKE_CASE__ = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = list(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = conv_bias
SCREAMING_SNAKE_CASE__ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = feat_proj_dropout
SCREAMING_SNAKE_CASE__ = final_dropout
SCREAMING_SNAKE_CASE__ = layerdrop
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_ctc_classes
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = do_stable_layer_norm
SCREAMING_SNAKE_CASE__ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ = apply_spec_augment
SCREAMING_SNAKE_CASE__ = mask_time_prob
SCREAMING_SNAKE_CASE__ = mask_time_length
SCREAMING_SNAKE_CASE__ = mask_time_min_masks
SCREAMING_SNAKE_CASE__ = mask_feature_prob
SCREAMING_SNAKE_CASE__ = mask_feature_length
SCREAMING_SNAKE_CASE__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE__ = num_codevectors_per_group
SCREAMING_SNAKE_CASE__ = num_codevector_groups
SCREAMING_SNAKE_CASE__ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE__ = feat_quantizer_dropout
SCREAMING_SNAKE_CASE__ = num_negatives
SCREAMING_SNAKE_CASE__ = codevector_dim
SCREAMING_SNAKE_CASE__ = proj_codevector_dim
SCREAMING_SNAKE_CASE__ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE__ = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ = ctc_zero_infinity
# pretraining loss
SCREAMING_SNAKE_CASE__ = replace_prob
@property
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 616 | 1 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCamelCase__: int = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : str=768 ) -> Optional[int]:
super().__init__(__snake_case )
UpperCAmelCase : Dict = proj_size
UpperCAmelCase : List[Any] = CLIPVisionModel(__snake_case )
UpperCAmelCase : Tuple = PaintByExampleMapper(__snake_case )
UpperCAmelCase : Optional[int] = nn.LayerNorm(config.hidden_size )
UpperCAmelCase : Any = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCAmelCase : Union[str, Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def A ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase : Optional[int] = self.model(pixel_values=__snake_case )
UpperCAmelCase : Optional[Any] = clip_output.pooler_output
UpperCAmelCase : Tuple = self.mapper(latent_states[:, None] )
UpperCAmelCase : str = self.final_layer_norm(__snake_case )
UpperCAmelCase : Optional[Any] = self.proj_out(__snake_case )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , __snake_case : Optional[Any] ) -> Tuple:
super().__init__()
UpperCAmelCase : List[Any] = (config.num_hidden_layers + 1) // 5
UpperCAmelCase : Any = config.hidden_size
UpperCAmelCase : Optional[int] = 1
UpperCAmelCase : Any = nn.ModuleList(
[
BasicTransformerBlock(__snake_case , __snake_case , __snake_case , activation_fn='''gelu''' , attention_bias=__snake_case )
for _ in range(__snake_case )
] )
def A ( self : List[Any] , __snake_case : Any ) -> Any:
for block in self.blocks:
UpperCAmelCase : Dict = block(__snake_case )
return hidden_states
| 127 |
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ) -> Dict:
# Return True if there is node that has not iterated.
UpperCAmelCase : List[Any] = [False] * len(_lowerCAmelCase )
UpperCAmelCase : Tuple = []
queue.append(_lowerCAmelCase )
UpperCAmelCase : List[Any] = True
while queue:
UpperCAmelCase : str = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : int = u
return visited[t]
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> List[str]:
# This array is filled by BFS and to store path
UpperCAmelCase : Tuple = [-1] * (len(_lowerCAmelCase ))
UpperCAmelCase : Tuple = 0
while bfs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase : Optional[int] = float('''Inf''' )
UpperCAmelCase : Tuple = sink
while s != source:
# Find the minimum value in select path
UpperCAmelCase : Tuple = min(_lowerCAmelCase , graph[parent[s]][s] )
UpperCAmelCase : Optional[Any] = parent[s]
max_flow += path_flow
UpperCAmelCase : Union[str, Any] = sink
while v != source:
UpperCAmelCase : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCAmelCase : Any = parent[v]
return max_flow
UpperCamelCase__: List[str] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
UpperCamelCase__ , UpperCamelCase__: Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 127 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_ : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.encoder.norm.weight", "encoder.layernorm.weight"),
("transformer.encoder.norm.bias", "encoder.layernorm.bias"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
]
)
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = state_dict.pop(snake_case__ )
SCREAMING_SNAKE_CASE__ = val
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:2_56, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:2_56]
SCREAMING_SNAKE_CASE__ = in_proj_weight[2_56:5_12, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[2_56:5_12]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-2_56:, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:2_56, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:2_56]
SCREAMING_SNAKE_CASE__ = in_proj_weight[2_56:5_12, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[2_56:5_12]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-2_56:, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE__ = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight_cross_attn[:2_56, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias_cross_attn[:2_56]
SCREAMING_SNAKE_CASE__ = in_proj_weight_cross_attn[2_56:5_12, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias_cross_attn[2_56:5_12]
SCREAMING_SNAKE_CASE__ = in_proj_weight_cross_attn[-2_56:, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias_cross_attn[-2_56:]
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = image.size
SCREAMING_SNAKE_CASE__ = max(snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ = 8_00 if "detection" in checkpoint_url else 10_00
SCREAMING_SNAKE_CASE__ = target_max_size / current_max_size
SCREAMING_SNAKE_CASE__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = F.to_tensor(snake_case__ )
SCREAMING_SNAKE_CASE__ = F.normalize(snake_case__ , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
logger.info("""Converting model...""" )
# load original state dict
SCREAMING_SNAKE_CASE__ = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
SCREAMING_SNAKE_CASE__ = rename_backbone_keys(snake_case__ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
SCREAMING_SNAKE_CASE__ = state_dict.pop(snake_case__ )
SCREAMING_SNAKE_CASE__ = val
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = {0: "table", 1: "table rotated"}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
else:
SCREAMING_SNAKE_CASE__ = 1_25
SCREAMING_SNAKE_CASE__ = 6
SCREAMING_SNAKE_CASE__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = DetrImageProcessor(
format="""coco_detection""" , max_size=8_00 if """detection""" in checkpoint_url else 10_00 )
SCREAMING_SNAKE_CASE__ = TableTransformerForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# verify our conversion
SCREAMING_SNAKE_CASE__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
SCREAMING_SNAKE_CASE__ = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=snake_case__ )
SCREAMING_SNAKE_CASE__ = Image.open(snake_case__ ).convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = normalize(resize(snake_case__ , snake_case__ ) ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ = model(snake_case__ )
if "detection" in checkpoint_url:
SCREAMING_SNAKE_CASE__ = (1, 15, 3)
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
SCREAMING_SNAKE_CASE__ = (1, 1_25, 7)
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , snake_case__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
SCREAMING_SNAKE_CASE__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(snake_case__ )
image_processor.push_to_hub(snake_case__ )
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
type=str,
choices=[
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth",
"https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth",
],
help="URL of the Table Transformer checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A_ : Any = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 721 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
A_ : List[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='cifar10' ,metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'The column name of the images in the files.'} )
lowerCamelCase__ : Optional[str] = field(default=A__ ,metadata={'help': 'A folder containing the training data.'} )
lowerCamelCase__ : Optional[str] = field(default=A__ ,metadata={'help': 'A folder containing the validation data.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.1_5 ,metadata={'help': 'Percent to split off of train for validation.'} )
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} ,)
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} ,)
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
SCREAMING_SNAKE_CASE__ = {}
if self.train_dir is not None:
SCREAMING_SNAKE_CASE__ = self.train_dir
if self.validation_dir is not None:
SCREAMING_SNAKE_CASE__ = self.validation_dir
SCREAMING_SNAKE_CASE__ = data_files if data_files else None
@dataclass
class lowerCamelCase :
lowerCamelCase__ : str = field(
default=A__ ,metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCamelCase__ : str = field(
default='main' ,metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} ,)
lowerCamelCase__ : str = field(default=A__ ,metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} ,)
lowerCamelCase__ : float = field(
default=0.7_5 ,metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowerCamelCase__ : bool = field(
default=A__ ,metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class lowerCamelCase (A__ ):
lowerCamelCase__ : float = field(
default=1E-3 ,metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
SCREAMING_SNAKE_CASE__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE__ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE__ = ds["""train"""].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE__ = split["""train"""]
SCREAMING_SNAKE_CASE__ = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
SCREAMING_SNAKE_CASE__ = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case__ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case__ )
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case__ )
else:
SCREAMING_SNAKE_CASE__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining(snake_case__ )
if training_args.do_train:
SCREAMING_SNAKE_CASE__ = ds["""train"""].column_names
else:
SCREAMING_SNAKE_CASE__ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
SCREAMING_SNAKE_CASE__ = data_args.image_column_name
elif "image" in column_names:
SCREAMING_SNAKE_CASE__ = """image"""
elif "img" in column_names:
SCREAMING_SNAKE_CASE__ = """img"""
else:
SCREAMING_SNAKE_CASE__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE__ = image_processor.size["""shortest_edge"""]
else:
SCREAMING_SNAKE_CASE__ = (image_processor.size["""height"""], image_processor.size["""width"""])
SCREAMING_SNAKE_CASE__ = Compose(
[
Lambda(lambda snake_case__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(snake_case__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(snake_case__ ):
SCREAMING_SNAKE_CASE__ = [transforms(snake_case__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE__ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE__ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case__ )
# Compute absolute learning rate
SCREAMING_SNAKE_CASE__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
SCREAMING_SNAKE_CASE__ = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
SCREAMING_SNAKE_CASE__ = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE__ = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE__ = last_checkpoint
SCREAMING_SNAKE_CASE__ = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE__ = trainer.evaluate()
trainer.log_metrics("""eval""" , snake_case__ )
trainer.save_metrics("""eval""" , snake_case__ )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE__ = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def A ( snake_case__ ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 616 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : List[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 |
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __snake_case ( __magic_name__ ):
@staticmethod
@abstractmethod
def _snake_case ( UpperCamelCase_ ) -> Dict:
raise NotImplementedError()
@abstractmethod
def _snake_case ( self ) -> List[Any]:
raise NotImplementedError()
| 368 | 0 |
def __lowerCAmelCase ( __lowerCamelCase : List[Any] ) -> float:
if edge <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def __lowerCAmelCase ( __lowerCamelCase : Dict ) -> float:
if edge <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704 |
import math
from numpy import inf
from scipy.integrate import quad
def __lowerCAmelCase ( __lowerCamelCase : float ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
return quad(__lowerCamelCase , 0 , __lowerCamelCase , args=(__lowerCamelCase) )[0]
def __lowerCAmelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) -> float:
return math.pow(__lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 456 | 0 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if len(_A ) <= 1:
return [tuple(_A )]
SCREAMING_SNAKE_CASE__ = []
def generate(_A , _A ):
SCREAMING_SNAKE_CASE__ = [0] * n
res.append(tuple(_A ) )
SCREAMING_SNAKE_CASE__ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = arr[i], arr[0]
else:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = arr[i], arr[c[i]]
res.append(tuple(_A ) )
c[i] += 1
SCREAMING_SNAKE_CASE__ = 0
else:
SCREAMING_SNAKE_CASE__ = 0
i += 1
generate(len(_A ) , _A )
return res
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
_SCREAMING_SNAKE_CASE : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 493 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = 42
a = None
def UpperCAmelCase_ ( _A , _A=0.9_9_9 , _A="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
SCREAMING_SNAKE_CASE__ = []
for i in range(_A ):
SCREAMING_SNAKE_CASE__ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class UpperCAmelCase__ ( A__ , A__ ):
"""simple docstring"""
a = 1
@register_to_config
def __init__( self : Dict , __lowerCamelCase : int = 1000 , __lowerCamelCase : float = 0.0001 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : str = "linear" , __lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = 0 , __lowerCamelCase : str = "epsilon" , __lowerCamelCase : float = 1.0 , **__lowerCamelCase : str , ) -> Optional[Any]:
if kwargs.get('''set_alpha_to_one''' , __lowerCamelCase ) is not None:
SCREAMING_SNAKE_CASE__ = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __lowerCamelCase , standard_warn=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
SCREAMING_SNAKE_CASE__ = torch.tensor(__lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE__ = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE__ = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
SCREAMING_SNAKE_CASE__ = 1.0 - self.betas
SCREAMING_SNAKE_CASE__ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
SCREAMING_SNAKE_CASE__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ = 1.0
# setable values
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = torch.from_numpy(np.arange(0 , __lowerCamelCase ).copy().astype(np.intaa ) )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Optional[int] = None ) -> torch.FloatTensor:
return sample
def lowercase_ ( self : int , __lowerCamelCase : int , __lowerCamelCase : Union[str, torch.device] = None ) -> Union[str, Any]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
SCREAMING_SNAKE_CASE__ = num_inference_steps
SCREAMING_SNAKE_CASE__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ = (np.arange(0 , __lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
self.timesteps += self.config.steps_offset
def lowercase_ ( self : List[Any] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : int , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : float = 0.0 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
SCREAMING_SNAKE_CASE__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
SCREAMING_SNAKE_CASE__ = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__ = model_output
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__ = model_output
SCREAMING_SNAKE_CASE__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
SCREAMING_SNAKE_CASE__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase )
def __len__( self : List[str] ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 493 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
def __snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case ( self ):
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Tuple = 3
UpperCAmelCase__ : Tuple = (32, 32)
UpperCAmelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def __snake_case ( self ):
def extract(*UpperCamelCase_ , **UpperCamelCase_ ):
class a :
def __init__( self ):
UpperCAmelCase__ : Dict = torch.ones([0] )
def __snake_case ( self , UpperCamelCase_ ):
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def __snake_case ( self ):
UpperCAmelCase__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Any = self.dummy_cond_unet
UpperCAmelCase__ : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = self.dummy_vae
UpperCAmelCase__ : Any = self.dummy_text_encoder
UpperCAmelCase__ : Tuple = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
UpperCAmelCase__ : str = 77
UpperCAmelCase__ : Tuple = self.dummy_image.to(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase__ : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = "A painting of a squirrel eating a burger"
UpperCAmelCase__ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
UpperCAmelCase__ : Dict = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowerCAmelCase__ , )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
UpperCAmelCase__ : List[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
UpperCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase__ : Dict = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __snake_case ( self ):
UpperCAmelCase__ : str = self.dummy_cond_unet
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = self.dummy_vae
UpperCAmelCase__ : Tuple = self.dummy_text_encoder
UpperCAmelCase__ : Optional[int] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
UpperCAmelCase__ : Optional[Any] = 77
UpperCAmelCase__ : Any = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
UpperCAmelCase__ : Any = unet.half()
UpperCAmelCase__ : List[str] = vae.half()
UpperCAmelCase__ : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ : List[str] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
UpperCAmelCase__ : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase__ : Any = "A painting of a squirrel eating a burger"
UpperCAmelCase__ : Any = torch.manual_seed(0 )
UpperCAmelCase__ : int = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type='np' , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase__ : Optional[Any] = init_image.resize((760, 504) )
UpperCAmelCase__ : Optional[int] = "BAAI/AltDiffusion"
UpperCAmelCase__ : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ : str = "A fantasy landscape, trending on artstation"
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : List[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.75 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type='np' , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
UpperCAmelCase__ : List[Any] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCAmelCase__ : Dict = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
UpperCAmelCase__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
UpperCAmelCase__ : Any = init_image.resize((768, 512) )
UpperCAmelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
UpperCAmelCase__ : Optional[Any] = "BAAI/AltDiffusion"
UpperCAmelCase__ : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = "A fantasy landscape, trending on artstation"
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Dict = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.75 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type='np' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 709 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class a ( lowercase ):
UpperCamelCase : List[Any] = """deit"""
def __init__( self , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3_072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.02 , UpperCamelCase_=1E-12 , UpperCamelCase_=224 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=16 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Optional[Any] = layer_norm_eps
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Optional[int] = patch_size
UpperCAmelCase__ : Union[str, Any] = num_channels
UpperCAmelCase__ : Optional[Any] = qkv_bias
UpperCAmelCase__ : Dict = encoder_stride
class a ( lowercase ):
UpperCamelCase : Optional[int] = version.parse("""1.11""" )
@property
def __snake_case ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __snake_case ( self ):
return 1E-4
| 254 | 0 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 411 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : int =BioGptTokenizer
a_ : Any =False
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
_snake_case : List[str] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : Tuple = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
_snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(UpperCamelCase ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
def UpperCamelCase_ ( self : Any , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = 'lower newer'
_snake_case : Optional[int] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
_snake_case : Tuple = 'lower'
_snake_case : Optional[Any] = ['low', 'er</w>']
_snake_case : Any = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
_snake_case : List[Any] = tokens + ['<unk>']
_snake_case : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : Optional[int] = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
_snake_case : Any = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase )
_snake_case : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase )
_snake_case : Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 411 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __A ( unittest.TestCase ):
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : List[str] =[[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Optional[int] =DisjunctiveConstraint(__snake_case )
self.assertTrue(isinstance(dc.token_ids , __snake_case ) )
with self.assertRaises(__snake_case ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__snake_case ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =[[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__snake_case ):
DisjunctiveConstraint(__snake_case ) # fails here
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : List[str] =[[1, 2, 3], [1, 2, 4]]
__magic_name__ : Any =DisjunctiveConstraint(__snake_case )
__magic_name__ : int =dc.update(1 )
__magic_name__ : Optional[int] =stepped is True and completed is False and reset is False
self.assertTrue(__snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ : Union[str, Any] =dc.update(2 )
__magic_name__ : Tuple =stepped is True and completed is False and reset is False
self.assertTrue(__snake_case )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ : List[str] =dc.update(3 )
__magic_name__ : Dict =stepped is True and completed is True and reset is False
self.assertTrue(__snake_case )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =[[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : List[str] =DisjunctiveConstraint(__snake_case )
__magic_name__ : Union[str, Any] =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ : List[str] =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ : Tuple =dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ : Tuple =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ : str =dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ : Union[str, Any] =dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ : Dict =dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 708 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """pix2struct_text_model"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :List[Any] , __snake_case :Dict=5_02_44 , __snake_case :Dict=7_68 , __snake_case :Union[str, Any]=64 , __snake_case :Union[str, Any]=20_48 , __snake_case :List[Any]=12 , __snake_case :Any=12 , __snake_case :str=32 , __snake_case :Dict=1_28 , __snake_case :Optional[int]=0.1 , __snake_case :Any=1E-6 , __snake_case :Optional[int]=1.0 , __snake_case :List[Any]="gelu_new" , __snake_case :Any=0 , __snake_case :Tuple=False , __snake_case :Any=0 , __snake_case :int=1 , __snake_case :Dict=False , __snake_case :Optional[Any]=True , **__snake_case :Dict , ):
'''simple docstring'''
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Optional[int] =d_kv
__magic_name__ : Dict =d_ff
__magic_name__ : Tuple =num_layers
__magic_name__ : str =num_heads
__magic_name__ : Optional[Any] =relative_attention_num_buckets
__magic_name__ : List[Any] =relative_attention_max_distance
__magic_name__ : str =dropout_rate
__magic_name__ : Optional[Any] =layer_norm_epsilon
__magic_name__ : Union[str, Any] =initializer_factor
__magic_name__ : Tuple =use_cache
__magic_name__ : Any =eos_token_id
__magic_name__ : int =decoder_start_token_id
# for backwards compatibility
__magic_name__ : Union[str, Any] =dense_act_fn
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , tie_word_embeddings=__snake_case , is_decoder=__snake_case , **__snake_case , )
@classmethod
def A__ ( cls :Union[str, Any] , __snake_case :Union[str, os.PathLike] , **__snake_case :Any ):
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
__magic_name__ , __magic_name__ : Optional[Any] =cls.get_config_dict(__snake_case , **__snake_case )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
__magic_name__ : Tuple =config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__snake_case , **__snake_case )
class __A ( UpperCamelCase__ ):
UpperCamelCase = """pix2struct_vision_model"""
def __init__( self :Union[str, Any] , __snake_case :Tuple=7_68 , __snake_case :Any=7_68 , __snake_case :Tuple=20_48 , __snake_case :Dict=64 , __snake_case :List[str]=12 , __snake_case :str=12 , __snake_case :str="gelu_new" , __snake_case :Optional[int]=1E-6 , __snake_case :Optional[int]=0.0 , __snake_case :Any=0.0 , __snake_case :Any=1E-10 , __snake_case :List[str]=1.0 , __snake_case :Tuple=40_96 , __snake_case :Optional[int]=32 , __snake_case :Union[str, Any]=1_28 , **__snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(**__snake_case )
__magic_name__ : Union[str, Any] =hidden_size
__magic_name__ : Dict =patch_embed_hidden_size
__magic_name__ : Tuple =d_ff
__magic_name__ : str =dropout_rate
__magic_name__ : str =num_hidden_layers
__magic_name__ : Dict =num_attention_heads
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =initializer_factor
__magic_name__ : str =attention_dropout
__magic_name__ : Union[str, Any] =layer_norm_eps
__magic_name__ : List[str] =dense_act_fn
__magic_name__ : List[str] =seq_len
__magic_name__ : str =relative_attention_num_buckets
__magic_name__ : Optional[int] =relative_attention_max_distance
__magic_name__ : List[str] =d_kv
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :Union[str, os.PathLike] , **__snake_case :Any ):
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
__magic_name__ , __magic_name__ : Optional[int] =cls.get_config_dict(__snake_case , **__snake_case )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
__magic_name__ : int =config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__snake_case , **__snake_case )
class __A ( UpperCamelCase__ ):
UpperCamelCase = """pix2struct"""
UpperCamelCase = True
def __init__( self :Union[str, Any] , __snake_case :Optional[int]=None , __snake_case :List[Any]=None , __snake_case :Optional[Any]=1.0 , __snake_case :Tuple=0.02 , __snake_case :str=False , __snake_case :List[str]=False , __snake_case :str=True , **__snake_case :List[str] , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=__snake_case , is_encoder_decoder=__snake_case , **__snake_case )
if text_config is None:
__magic_name__ : int ={}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
__magic_name__ : str ={}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
__magic_name__ : Union[str, Any] =PixaStructTextConfig(**__snake_case )
__magic_name__ : str =PixaStructVisionConfig(**__snake_case )
__magic_name__ : int =self.text_config.decoder_start_token_id
__magic_name__ : Optional[Any] =self.text_config.pad_token_id
__magic_name__ : str =self.text_config.eos_token_id
__magic_name__ : int =initializer_factor
__magic_name__ : List[Any] =initializer_range
__magic_name__ : Dict =self.initializer_range
__magic_name__ : Union[str, Any] =self.initializer_range
__magic_name__ : Tuple =is_vqa
@classmethod
def A__ ( cls :Optional[int] , __snake_case :PixaStructTextConfig , __snake_case :PixaStructVisionConfig , **__snake_case :List[str] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =copy.deepcopy(self.__dict__ )
__magic_name__ : Any =self.text_config.to_dict()
__magic_name__ : List[str] =self.vision_config.to_dict()
__magic_name__ : Optional[int] =self.__class__.model_type
return output
| 367 | 0 |
def UpperCAmelCase_ (_lowerCAmelCase : list[int] ):
__UpperCamelCase : str = len(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
for j in range(i + 1 , _lowerCAmelCase ):
if numbers[j] < numbers[i]:
__UpperCamelCase , __UpperCamelCase : List[str] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase : Optional[Any] = input("Enter numbers separated by a comma:\n").strip()
lowercase : int = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted)) | 327 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Optional[int] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 327 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCAmelCase : Dict = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
_lowerCAmelCase : Optional[int] = {
'''junnyu/roformer_chinese_small''': 1536,
'''junnyu/roformer_chinese_base''': 1536,
'''junnyu/roformer_chinese_char_small''': 512,
'''junnyu/roformer_chinese_char_base''': 512,
'''junnyu/roformer_small_discriminator''': 128,
'''junnyu/roformer_small_generator''': 128,
}
_lowerCAmelCase : List[Any] = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class A_ ( _a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = RoFormerTokenizer
def __init__( self: str ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Tuple=None ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: int="[UNK]" ,__lowerCAmelCase: List[str]="[SEP]" ,__lowerCAmelCase: List[str]="[PAD]" ,__lowerCAmelCase: List[Any]="[CLS]" ,__lowerCAmelCase: List[Any]="[MASK]" ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(
__lowerCAmelCase ,tokenizer_file=__lowerCAmelCase ,do_lower_case=__lowerCAmelCase ,unk_token=__lowerCAmelCase ,sep_token=__lowerCAmelCase ,pad_token=__lowerCAmelCase ,cls_token=__lowerCAmelCase ,mask_token=__lowerCAmelCase ,tokenize_chinese_chars=__lowerCAmelCase ,strip_accents=__lowerCAmelCase ,**__lowerCAmelCase ,)
_lowerCamelCase : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" ,__lowerCAmelCase ) != do_lower_case
or pre_tok_state.get("strip_accents" ,__lowerCAmelCase ) != strip_accents
):
_lowerCamelCase : Dict = getattr(__lowerCAmelCase ,pre_tok_state.pop("type" ) )
_lowerCamelCase : List[Any] = do_lower_case
_lowerCamelCase : Optional[Any] = strip_accents
_lowerCamelCase : List[Any] = pre_tok_class(**__lowerCAmelCase )
_lowerCamelCase : int = do_lower_case
def __getstate__( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : List[str] = BertPreTokenizer()
return state
def __setstate__( self: Any ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = d
_lowerCamelCase : List[Any] = self.__dict__["_tokenizer"].get_vocab()
_lowerCamelCase : List[str] = PreTokenizer.custom(JiebaPreTokenizer(__lowerCAmelCase ) )
def _lowercase ( self: str ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: List[Any]=None ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[int] ,__lowerCAmelCase: Optional[List[int]] = None ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = [self.sep_token_id]
_lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self: Tuple ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[str] = None ):
'''simple docstring'''
_lowerCamelCase : str = self._tokenizer.model.save(__lowerCAmelCase ,name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Dict=None ,__lowerCAmelCase: str=False ,**__lowerCAmelCase: Optional[Any] ,):
'''simple docstring'''
_lowerCamelCase : List[str] = BertPreTokenizer()
return super().save_pretrained(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase ) | 386 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_a )
class A_ ( _a ):
lowerCAmelCase__ = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowerCAmelCase__ = Features({'image': Image()} )
lowerCAmelCase__ = Features({'labels': ClassLabel} )
lowerCAmelCase__ = "image"
lowerCAmelCase__ = "labels"
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,__lowerCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
_lowerCamelCase : List[Any] = copy.deepcopy(self )
_lowerCamelCase : Union[str, Any] = self.label_schema.copy()
_lowerCamelCase : Optional[int] = features[self.label_column]
_lowerCamelCase : Tuple = label_schema
return task_template
@property
def _lowercase ( self: int ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
} | 386 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a ={
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCAmelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 652 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 700 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 0 |
def lowerCAmelCase_ ( _lowercase : int) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase):
raise ValueError("""multiplicative_persistence() only accepts integral values""")
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""")
a__ : List[Any] = 0
a__ : Optional[int] = str(_lowercase)
while len(_lowercase) != 1:
a__ : Tuple = [int(_lowercase) for i in num_string]
a__ : List[str] = 1
for i in range(0 , len(_lowercase)):
total *= numbers[i]
a__ : Any = str(_lowercase)
steps += 1
return steps
def lowerCAmelCase_ ( _lowercase : int) -> int:
"""simple docstring"""
if not isinstance(_lowercase , _lowercase):
raise ValueError("""additive_persistence() only accepts integral values""")
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""")
a__ : Dict = 0
a__ : Optional[Any] = str(_lowercase)
while len(_lowercase) != 1:
a__ : List[str] = [int(_lowercase) for i in num_string]
a__ : Optional[int] = 0
for i in range(0 , len(_lowercase)):
total += numbers[i]
a__ : Optional[int] = str(_lowercase)
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 |
from __future__ import annotations
import math
from collections.abc import Callable
def lowerCAmelCase_ ( _lowercase : Callable[[int | float], int | float] , _lowercase : int | float , _lowercase : int | float , _lowercase : int = 100 , ) -> float:
"""simple docstring"""
a__ : List[str] = x_start
a__ : Union[str, Any] = fnc(_lowercase)
a__ : Optional[Any] = 0.0
for _ in range(_lowercase):
# Approximates curve as a sequence of linear lines and sums their length
a__ : str = (x_end - x_start) / steps + xa
a__ : Union[str, Any] = fnc(_lowercase)
length += math.hypot(xa - xa , fxa - fxa)
# Increment step
a__ : Tuple = xa
a__ : Optional[int] = fxa
return length
if __name__ == "__main__":
def lowerCAmelCase_ ( _lowercase : List[str]) -> str:
"""simple docstring"""
return math.sin(10 * x)
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
_lowercase : Optional[Any] =10
while i <= 10_0000:
print(f'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 136 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :List[str] = '''dandelin/vilt-b32-finetuned-vqa'''
lowerCamelCase_ :List[Any] = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
lowerCamelCase_ :List[str] = '''image_qa'''
lowerCamelCase_ :Dict = AutoProcessor
lowerCamelCase_ :Union[str, Any] = AutoModelForVisualQuestionAnswering
lowerCamelCase_ :int = ['''image''', '''text''']
lowerCamelCase_ :Dict = ['''text''']
def __init__( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*snake_case_ , **snake_case_ )
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
return self.pre_processor(snake_case_ , snake_case_ , return_tensors='pt' )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
with torch.no_grad():
return self.model(**snake_case_ ).logits
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : int = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 718 | '''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase_ :Tuple = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ :Any = '''FlavaImageProcessor'''
lowerCamelCase_ :List[str] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case_ , )
UpperCAmelCase_ : Optional[int] = kwargs.pop('feature_extractor' )
UpperCAmelCase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case_ , snake_case_ )
UpperCAmelCase_ : Optional[int] = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = False , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
UpperCAmelCase_ : int = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
if images is not None:
UpperCAmelCase_ : Optional[Any] = self.image_processor(
snake_case_ , return_image_mask=snake_case_ , return_codebook_pixels=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
if text is not None and images is not None:
encoding.update(snake_case_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = self.tokenizer.model_input_names
UpperCAmelCase_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case_ , )
return self.image_processor_class
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case_ , )
return self.image_processor
| 389 | 0 |
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=99 , UpperCamelCase_=13 , UpperCamelCase_=16 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=2 , UpperCamelCase_=32 , UpperCamelCase_=4 , UpperCamelCase_=4 , UpperCamelCase_=30 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=None , ) -> str:
__lowercase : int = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[Any] = decoder_seq_length
# For common tests
__lowercase : str = self.decoder_seq_length
__lowercase : Dict = is_training
__lowercase : int = use_attention_mask
__lowercase : Dict = use_labels
__lowercase : Dict = vocab_size
__lowercase : Optional[Any] = d_model
__lowercase : List[str] = d_model
__lowercase : Any = decoder_layers
__lowercase : Any = decoder_layers
__lowercase : Optional[Any] = decoder_ffn_dim
__lowercase : Union[str, Any] = decoder_attention_heads
__lowercase : Dict = decoder_attention_heads
__lowercase : str = eos_token_id
__lowercase : List[str] = bos_token_id
__lowercase : Union[str, Any] = pad_token_id
__lowercase : Union[str, Any] = decoder_start_token_id
__lowercase : Dict = use_cache
__lowercase : Dict = max_position_embeddings
__lowercase : List[str] = None
__lowercase : Dict = decoder_seq_length
__lowercase : Tuple = 2
__lowercase : Optional[Any] = 1
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowercase : Union[str, Any] = None
if self.use_attention_mask:
__lowercase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__lowercase : Union[str, Any] = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowercase : str = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> str:
__lowercase : Optional[int] = True
__lowercase : Tuple = TrOCRDecoder(config=A_ ).to(A_ ).eval()
__lowercase : Optional[Any] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__lowercase : int = model(A_ , use_cache=A_ )
__lowercase : Optional[int] = model(A_ )
__lowercase : Any = model(A_ , use_cache=A_ )
self.parent.assertTrue(len(A_ ) == len(A_ ) )
self.parent.assertTrue(len(A_ ) == len(A_ ) + 1 )
__lowercase : str = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
__lowercase : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__lowercase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : List[str] = model(A_ )['''last_hidden_state''']
__lowercase : Dict = model(A_ , past_key_values=A_ )['''last_hidden_state''']
# select random slice
__lowercase : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : Optional[Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__lowercase : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(A_ , A_ , atol=1E-3 )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : str = self.prepare_config_and_inputs()
__lowercase ,__lowercase ,__lowercase ,__lowercase : Dict = config_and_inputs
__lowercase : Any = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCamelCase =(TrOCRForCausalLM,) if is_torch_available() else ()
UpperCamelCase ={"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
UpperCamelCase =True
UpperCamelCase =False
def _lowerCamelCase ( self ) -> Any:
__lowercase : Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=A_ )
__lowercase : Tuple = ConfigTester(self , config_class=A_ )
def _lowerCamelCase ( self ) -> Optional[int]:
pass
def _lowerCamelCase ( self ) -> List[Any]:
pass
def _lowerCamelCase ( self ) -> Optional[int]:
pass
def _lowerCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*A_ )
def _lowerCamelCase ( self ) -> Tuple:
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def _lowerCamelCase ( self ) -> Any:
pass
| 76 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_A : Optional[int] = object()
# For specifying empty leaf dict `{}`
_A : Tuple = object()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(lowerCAmelCase_ ) - len(lowerCAmelCase_ ) + 1 ):
SCREAMING_SNAKE_CASE__ = [x.match(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , ks[i:] )]
if matches and all(lowerCAmelCase_ ):
return True
return False
def __snake_case ( lowerCAmelCase_ ) -> int:
def replace(lowerCAmelCase_ , lowerCAmelCase_ ):
for rule, replacement in rules:
if _match(lowerCAmelCase_ , lowerCAmelCase_ ):
return replacement
return val
return replace
def __snake_case ( ) -> Optional[int]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , lowerCAmelCase_ )),
(("transformer", "wte", "embedding"), P('''mp''' , lowerCAmelCase_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase_ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , lowerCAmelCase_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCAmelCase_ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , lowerCAmelCase_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __snake_case ( lowerCAmelCase_ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = _get_partition_rules()
SCREAMING_SNAKE_CASE__ = _replacement_rules(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {k: _unmatched for k in flatten_dict(lowerCAmelCase_ )}
SCREAMING_SNAKE_CASE__ = {k: replace(lowerCAmelCase_ , lowerCAmelCase_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCAmelCase_ ) )
| 100 | 0 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A_ ( __a ):
'''simple docstring'''
def __init__( self , _A = 101) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = length
def __len__( self) -> str:
"""simple docstring"""
return self.length
def __getitem__( self , _A) -> Any:
"""simple docstring"""
return i
class A_ :
'''simple docstring'''
def __call__( self , _A) -> Optional[Any]:
"""simple docstring"""
return {"input_ids": torch.tensor(a_), "labels": torch.tensor(a_)}
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self) -> Any:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_UpperCAmelCase : Optional[int] = nn.Linear(120 , 80)
def snake_case__ ( self , _A , _A=None) -> Any:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class A_ ( __a ):
'''simple docstring'''
@require_torch_neuroncore
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = f'''--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '''.split()
_UpperCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : int = f'''--output_dir {output_dir}'''.split()
_UpperCAmelCase : List[Any] = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(a_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class A_ ( __a ):
'''simple docstring'''
@require_torch_multi_gpu
def snake_case__ ( self) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = f'''--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '''.split()
_UpperCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Tuple = f'''--output_dir {output_dir}'''.split()
_UpperCAmelCase : Optional[int] = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(a_ , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
SCREAMING_SNAKE_CASE = HfArgumentParser((TrainingArguments,))
SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '
F'distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
SCREAMING_SNAKE_CASE = DummyDataset(dataset_length)
def _lowerCamelCase ( __A : EvalPrediction ) -> Dict:
_UpperCAmelCase : Tuple = list(range(len(snake_case__ ) ) )
_UpperCAmelCase : Union[str, Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
SCREAMING_SNAKE_CASE = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
SCREAMING_SNAKE_CASE = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
SCREAMING_SNAKE_CASE = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
SCREAMING_SNAKE_CASE = None | 706 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy-config.json')
class A_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = 0
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
self.assertIsNotNone(transformers.models.auto.__spec__)
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto'''))
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained('''bert-base-uncased''')
self.assertIsInstance(_A , _A)
def snake_case__ ( self) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = AutoConfig.for_model('''roberta''')
self.assertIsInstance(_A , _A)
def snake_case__ ( self) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_UpperCAmelCase : str = os.path.join(_A , '''fake-roberta''')
os.makedirs(_A , exist_ok=_A)
with open(os.path.join(_A , '''config.json''') , '''w''') as f:
f.write(json.dumps({}))
_UpperCAmelCase : Any = AutoConfig.from_pretrained(_A)
self.assertEqual(type(_A) , _A)
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
try:
AutoConfig.register('''custom''' , _A)
# Wrong model type will raise an error
with self.assertRaises(_A):
AutoConfig.register('''model''' , _A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A):
AutoConfig.register('''bert''' , _A)
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCAmelCase : Tuple = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_A)
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained(_A)
self.assertIsInstance(_A , _A)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier'''):
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained('''bert-base''')
def snake_case__ ( self) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
_UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(_A , revision='''aaaaaa''')
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_A , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''')
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
with self.assertRaises(_A):
_UpperCAmelCase : Dict = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A):
_UpperCAmelCase : Optional[int] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_A)
_UpperCAmelCase : int = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_A)
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''')
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_A)
_UpperCAmelCase : Any = AutoConfig.from_pretrained(_A , trust_remote_code=_A)
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''')
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = "new-model"
try:
AutoConfig.register('''new-model''' , _A)
# If remote code is not set, the default is to use local
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''')
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''')
# If remote code is disabled, we load the local one.
_UpperCAmelCase : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_A)
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''')
# If remote is enabled, we load from the Hub
_UpperCAmelCase : Dict = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_A)
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''')
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 186 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[Any]:
return np.where(vector > 0 , snake_case__ , (alpha * (np.exp(snake_case__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowerCAmelCase :Any = logging.get_logger(__name__)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__magic_name__ : Dict = json.loads(lowerCAmelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__magic_name__ : List[Any] = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__magic_name__ : List[Any] = json.loads(lowerCAmelCase )
if not mpi_options.get('sagemaker_mpi_enabled' , lowerCAmelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def __lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , _A , )
@cached_property
def __lowerCAmelCase ( self : Dict ) -> "torch.device":
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
__magic_name__ : Any = torch.device('cpu' )
__magic_name__ : List[str] = 0
elif is_sagemaker_model_parallel_available():
__magic_name__ : Any = smp.local_rank()
__magic_name__ : List[Any] = torch.device('cuda' , _A )
__magic_name__ : List[str] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
__magic_name__ : Optional[Any] = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
__magic_name__ : Dict = torch.device('cuda' , self.local_rank )
__magic_name__ : int = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__magic_name__ : Union[str, Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__magic_name__ : str = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
__magic_name__ : List[str] = torch.device('cuda' , self.local_rank )
__magic_name__ : Union[str, Any] = 1
if device.type == "cuda":
torch.cuda.set_device(_A )
return device
@property
def __lowerCAmelCase ( self : Tuple ) -> int:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
return not is_sagemaker_model_parallel_available()
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
return False | 561 | 0 |
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def _UpperCAmelCase ( a : Tuple ):
snake_case__ = VideoMAEConfig()
set_architecture_configs(a , a )
if "finetuned" not in model_name:
snake_case__ = False
if "finetuned" in model_name:
snake_case__ = """huggingface/label-files"""
if "kinetics" in model_name:
snake_case__ = 400
snake_case__ = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
snake_case__ = 174
snake_case__ = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) )
snake_case__ = {int(a ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( a : str , a : str ):
if "small" in model_name:
snake_case__ = 384
snake_case__ = 1536
snake_case__ = 12
snake_case__ = 16
snake_case__ = 12
snake_case__ = 3
snake_case__ = 192
snake_case__ = 768
elif "large" in model_name:
snake_case__ = 1024
snake_case__ = 4096
snake_case__ = 24
snake_case__ = 16
snake_case__ = 12
snake_case__ = 8
snake_case__ = 512
snake_case__ = 2048
elif "huge" in model_name:
snake_case__ = 1280
snake_case__ = 5120
snake_case__ = 32
snake_case__ = 16
snake_case__ = 12
snake_case__ = 8
snake_case__ = 640
snake_case__ = 2560
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def _UpperCAmelCase ( a : Dict ):
if "encoder." in name:
snake_case__ = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
snake_case__ = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
snake_case__ = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
snake_case__ = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
snake_case__ = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case__ = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
snake_case__ = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
snake_case__ = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
snake_case__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
snake_case__ = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
snake_case__ = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
snake_case__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
snake_case__ = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
snake_case__ = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
snake_case__ = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
snake_case__ = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
snake_case__ = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
snake_case__ = name.replace("""head""" , """classifier""" )
return name
def _UpperCAmelCase ( a : List[str] , a : Union[str, Any] ):
for key in orig_state_dict.copy().keys():
snake_case__ = orig_state_dict.pop(a )
if key.startswith("""encoder.""" ):
snake_case__ = key.replace("""encoder.""" , """""" )
if "qkv" in key:
snake_case__ = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
snake_case__ = config.decoder_hidden_size
snake_case__ = int(key_split[2] )
snake_case__ = """decoder.decoder_layers."""
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[dim : dim * 2, :]
snake_case__ = val[-dim:, :]
else:
snake_case__ = config.hidden_size
snake_case__ = int(key_split[1] )
snake_case__ = """videomae.encoder.layer."""
if "weight" in key:
snake_case__ = val[:dim, :]
snake_case__ = val[dim : dim * 2, :]
snake_case__ = val[-dim:, :]
else:
snake_case__ = val
return orig_state_dict
def _UpperCAmelCase ( ):
snake_case__ = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
snake_case__ = np.load(a )
return list(a )
def _UpperCAmelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : Dict ):
snake_case__ = get_videomae_config(a )
if "finetuned" in model_name:
snake_case__ = VideoMAEForVideoClassification(a )
else:
snake_case__ = VideoMAEForPreTraining(a )
# download original checkpoint, hosted on Google Drive
snake_case__ = """pytorch_model.bin"""
gdown.cached_download(a , a , quiet=a )
snake_case__ = torch.load(a , map_location="""cpu""" )
if "model" in files:
snake_case__ = files["""model"""]
else:
snake_case__ = files["""module"""]
snake_case__ = convert_state_dict(a , a )
model.load_state_dict(a )
model.eval()
# verify model on basic input
snake_case__ = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
snake_case__ = prepare_video()
snake_case__ = image_processor(a , return_tensors="""pt""" )
if "finetuned" not in model_name:
snake_case__ = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
snake_case__ = torch.load(a )
snake_case__ = model(**a )
snake_case__ = outputs.logits
snake_case__ = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
snake_case__ = torch.Size([1, 400] )
snake_case__ = torch.tensor([-0.9291, -0.4061, -0.9307] )
elif model_name == "videomae-small-finetuned-ssv2":
snake_case__ = torch.Size([1, 174] )
snake_case__ = torch.tensor([0.2671, -0.4689, -0.8235] )
elif model_name == "videomae-base":
snake_case__ = torch.Size([1, 1408, 1536] )
snake_case__ = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]] )
elif model_name == "videomae-base-short":
snake_case__ = torch.Size([1, 1408, 1536] )
snake_case__ = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] )
# we verified the loss both for normalized and unnormalized targets for this one
snake_case__ = torch.tensor([0.5142] ) if config.norm_pix_loss else torch.tensor([0.6469] )
elif model_name == "videomae-large":
snake_case__ = torch.Size([1, 1408, 1536] )
snake_case__ = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]] )
elif model_name == "videomae-large-finetuned-kinetics":
snake_case__ = torch.Size([1, 400] )
snake_case__ = torch.tensor([0.0771, 0.0011, -0.3625] )
elif model_name == "videomae-huge-finetuned-kinetics":
snake_case__ = torch.Size([1, 400] )
snake_case__ = torch.tensor([0.2433, 0.1632, -0.4894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
snake_case__ = torch.Size([1, 400] )
snake_case__ = torch.tensor([0.6588, 0.0990, -0.2493] )
elif model_name == "videomae-base-finetuned-kinetics":
snake_case__ = torch.Size([1, 400] )
snake_case__ = torch.tensor([0.3669, -0.0688, -0.2421] )
elif model_name == "videomae-base-short-ssv2":
snake_case__ = torch.Size([1, 1408, 1536] )
snake_case__ = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
snake_case__ = torch.Size([1, 174] )
snake_case__ = torch.tensor([-0.0537, -0.1539, -0.3266] )
elif model_name == "videomae-base-ssv2":
snake_case__ = torch.Size([1, 1408, 1536] )
snake_case__ = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]] )
elif model_name == "videomae-base-finetuned-ssv2":
snake_case__ = torch.Size([1, 174] )
snake_case__ = torch.tensor([0.1961, -0.8337, -0.6389] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , a , atol=1e-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , a , atol=1e-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
snake_case__ = outputs.loss
assert torch.allclose(a , a , atol=1e-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a )
model.save_pretrained(a )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(a , organization="""nielsr""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""",
type=str,
help=(
"""URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct"""
""" download link."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/Users/nielsrogge/Documents/VideoMAE/Test""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""")
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a__ = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 708 |
import math
from collections.abc import Iterator
from itertools import takewhile
def _UpperCAmelCase ( a : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( ):
snake_case__ = 2
while True:
if is_prime(a ):
yield num
num += 1
def _UpperCAmelCase ( a : int = 200_0000 ):
return sum(takewhile(lambda a : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 99 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any]=13 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=99 , SCREAMING_SNAKE_CASE__ : int=64 , SCREAMING_SNAKE_CASE__ : Optional[Any]=32 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=37 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=5_12 , SCREAMING_SNAKE_CASE__ : str=16 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Any=None , ) -> Any:
A : Optional[Any] =parent
A : Dict =batch_size
A : Dict =seq_length
A : Tuple =is_training
A : Tuple =use_input_mask
A : Optional[Any] =use_token_type_ids
A : Dict =use_labels
A : int =vocab_size
A : Any =hidden_size
A : Union[str, Any] =embedding_size
A : Any =num_hidden_layers
A : List[str] =num_attention_heads
A : List[str] =intermediate_size
A : Union[str, Any] =hidden_act
A : str =hidden_dropout_prob
A : Optional[Any] =attention_probs_dropout_prob
A : str =max_position_embeddings
A : Optional[Any] =type_vocab_size
A : Optional[Any] =type_sequence_label_size
A : Dict =initializer_range
A : List[str] =num_labels
A : Any =num_choices
A : Optional[Any] =scope
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
A : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : List[str] =None
if self.use_input_mask:
A : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
A : Any =None
if self.use_token_type_ids:
A : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Any =None
A : Union[str, Any] =None
A : str =None
if self.use_labels:
A : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
A : List[str] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
A : int =MobileBertModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : List[Any] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
A : Dict =model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ )
A : Dict =model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> str:
A : List[str] =MobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Optional[Any] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
A : str =MobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Tuple =model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
A : Any =MobileBertForPreTraining(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : List[str] =model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , next_sentence_label=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
A : int =MobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : List[Any] =model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
A : List[str] =self.num_labels
A : List[str] =MobileBertForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Any =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
A : Tuple =self.num_labels
A : Dict =MobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Optional[int] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
A : Union[str, Any] =self.num_choices
A : Optional[int] =MobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A : Optional[int] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Any =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : List[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A : Dict =model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[Any]:
A : Optional[Any] =self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) : Optional[Any] =config_and_inputs
A : Optional[Any] ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Any = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase : List[Any] = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Any = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> Optional[Any]:
A : List[Any] =super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE__ ):
A : Dict =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
A : List[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[Any]:
A : str =MobileBertModelTester(self )
A : Optional[int] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Any:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
A : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Any:
A : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[Any]:
A : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
A : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
A : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Dict:
A : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
A : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
A : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE__ )
def A__ ( lowercase: int ) -> Any:
return torch.tensor(
lowercase, dtype=torch.long, device=lowercase, )
_lowercase : List[str] =1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
A : Optional[Any] =MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(SCREAMING_SNAKE_CASE__ )
A : Dict =_long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
A : Tuple =model(SCREAMING_SNAKE_CASE__ )[0]
A : Optional[int] =torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
A : str =torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=SCREAMING_SNAKE_CASE__ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
A : Optional[Any] =torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
A : Union[str, Any] =torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 305 | def A__ ( lowercase: Any, lowercase: List[Any], lowercase: List[Any]=False ) -> Dict:
if isinstance(lowercase, lowercase ) and isinstance(lowercase, lowercase ):
A : int =len(set_a.intersection(lowercase ) )
if alternative_union:
A : Tuple =len(lowercase ) + len(lowercase )
else:
A : Any =len(set_a.union(lowercase ) )
return intersection / union
if isinstance(lowercase, (list, tuple) ) and isinstance(lowercase, (list, tuple) ):
A : int =[element for element in set_a if element in set_b]
if alternative_union:
A : Union[str, Any] =len(lowercase ) + len(lowercase )
return len(lowercase ) / union
else:
A : Optional[Any] =set_a + [element for element in set_b if element not in set_a]
return len(lowercase ) / len(lowercase )
return len(lowercase ) / len(lowercase )
return None
if __name__ == "__main__":
_lowercase : str ={'''a''', '''b''', '''c''', '''d''', '''e'''}
_lowercase : List[Any] ={'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 305 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase__ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
super().__init__(
__A , question_encoder_tokenizer=__A , generator_tokenizer=__A , index=__A , init_retrieval=__A , )
__a : Union[str, Any] = None
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__a : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__a : int = str(distributed_port + 1 )
__a : Any = dist.new_group(ranks=__A , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def lowerCAmelCase__(self ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=torch.floataa ):
'''simple docstring'''
__a : Dict = torch.empty(__A , dtype=__A )
dist.scatter(__A , src=0 , scatter_list=__A , group=self.process_group )
return target_tensor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__a : Optional[int] = next((addr for addr in addrs if addr.startswith("""e""" )) , __A )
return ifname
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
if not dist.is_initialized():
__a : Dict = self._main_retrieve(__A , __A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__A )
# distributed training
__a : Optional[Any] = dist.get_world_size(group=self.process_group )
# gather logic
__a : Optional[int] = None
if self._is_main():
__a : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__A )]
dist.gather(torch.tensor(__A ) , dst=0 , gather_list=__A , group=self.process_group )
# scatter logic
__a : List[Any] = question_hidden_states.shape[0]
__a : Optional[int] = []
__a : Dict = []
if self._is_main():
assert len(__A ) == world_size
__a : Union[str, Any] = self._main_retrieve(torch.cat(__A ).numpy() , __A )
__a : Union[str, Any] = torch.tensor(__A ), torch.tensor(__A )
__a : Optional[int] = self._chunk_tensor(__A , __A )
__a : Any = self._chunk_tensor(__A , __A )
__a : Optional[int] = self._scattered(__A , [n_queries, n_docs] , target_type=torch.intaa )
__a : Optional[int] = self._scattered(__A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__A )
| 711 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63 | 0 |
import qiskit
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
UpperCamelCase :Optional[int] = qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCamelCase :Union[str, Any] = qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 658 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
snake_case__ = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase ( __lowerCamelCase ):
a__: bool = field(default=__lowerCamelCase , metadata={"""help""": """Whether to use SortishSampler or not."""} )
a__: bool = field(
default=__lowerCamelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
a__: Optional[int] = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
a__: Optional[int] = field(
default=__lowerCamelCase , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
a__: Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowerCamelCase , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def _lowerCAmelCase ( self : List[str] ):
lowercase : Union[str, Any] = super().to_dict()
for k, v in d.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowercase : Tuple = v.to_dict()
return d
| 583 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """llama"""
UpperCamelCase = ["""past_key_values"""]
def __init__( self :List[Any] , __snake_case :Optional[int]=3_20_00 , __snake_case :Optional[int]=40_96 , __snake_case :Optional[Any]=1_10_08 , __snake_case :int=32 , __snake_case :Any=32 , __snake_case :str=None , __snake_case :int="silu" , __snake_case :Any=20_48 , __snake_case :Optional[Any]=0.02 , __snake_case :Optional[int]=1E-6 , __snake_case :Dict=True , __snake_case :List[Any]=0 , __snake_case :List[Any]=1 , __snake_case :Tuple=2 , __snake_case :Union[str, Any]=1 , __snake_case :Dict=False , __snake_case :int=None , **__snake_case :Dict , ):
'''simple docstring'''
__magic_name__ : Optional[Any] =vocab_size
__magic_name__ : Optional[int] =max_position_embeddings
__magic_name__ : int =hidden_size
__magic_name__ : Tuple =intermediate_size
__magic_name__ : Tuple =num_hidden_layers
__magic_name__ : List[str] =num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__magic_name__ : Optional[int] =num_attention_heads
__magic_name__ : List[str] =num_key_value_heads
__magic_name__ : List[str] =hidden_act
__magic_name__ : List[str] =initializer_range
__magic_name__ : List[Any] =rms_norm_eps
__magic_name__ : Any =pretraining_tp
__magic_name__ : List[Any] =use_cache
__magic_name__ : Any =rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , tie_word_embeddings=__snake_case , **__snake_case , )
def A__ ( self :Tuple ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __snake_case ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f"got {self.rope_scaling}" )
__magic_name__ : Tuple =self.rope_scaling.get("""type""" , __snake_case )
__magic_name__ : List[str] =self.rope_scaling.get("""factor""" , __snake_case )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__snake_case , __snake_case ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 367 |
import pytest
import datasets
# Import fixture modules as plugins
UpperCAmelCase_ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def lowerCAmelCase_ ( lowerCamelCase ):
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__magic_name__ : Tuple =tmp_path_factory.getbasetemp() / """cache"""
__magic_name__ : Optional[int] =test_hf_cache_home / """datasets"""
__magic_name__ : List[str] =test_hf_cache_home / """metrics"""
__magic_name__ : List[str] =test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(lowerCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(lowerCamelCase ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(lowerCamelCase ) )
__magic_name__ : Optional[Any] =test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(lowerCamelCase ) )
__magic_name__ : Tuple =test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowerCamelCase ) )
@pytest.fixture(autouse=lowerCamelCase , scope="""session""" )
def lowerCAmelCase_ ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase ):
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( lowerCamelCase ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , lowerCamelCase )
| 367 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if not nums:
return 0
__SCREAMING_SNAKE_CASE = nums[0]
__SCREAMING_SNAKE_CASE = 0
for num in nums[1:]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
f"""{test_file} instead.""" )
__SCREAMING_SNAKE_CASE = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__SCREAMING_SNAKE_CASE = components[:-1] + [test_fn.replace(".py" , "" )]
__SCREAMING_SNAKE_CASE = ".".join(lowerCAmelCase_ )
return test_module_path
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_module_path(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = importlib.import_module(lowerCAmelCase_ )
return test_module
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ )
for attr in dir(lowerCAmelCase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = get_test_module(lowerCAmelCase_ )
for attr in dir(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase_ , "all_model_classes" , [] )
if len(lowerCAmelCase_ ) > 0:
test_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = test_class()
if hasattr(lowerCAmelCase_ , "setUp" ):
test.setUp()
__SCREAMING_SNAKE_CASE = None
if hasattr(lowerCAmelCase_ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__SCREAMING_SNAKE_CASE = test.model_tester.__class__
return model_tester
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = []
for test_class in test_classes:
__SCREAMING_SNAKE_CASE = get_model_tester_from_test_class(lowerCAmelCase_ )
if tester_class is not None:
tester_classes.append(lowerCAmelCase_ )
# sort with class names
return sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x.__name__ )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_test_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {test_class: get_model_tester_from_test_class(lowerCAmelCase_ ) for test_class in test_classes}
return test_tester_mapping
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
model_class: get_test_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes
}
return model_test_mapping
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_model_classes(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {
model_class: get_tester_classes_for_model(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return o
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return o.__name__
elif isinstance(lowerCAmelCase_ , (list, tuple) ):
return [to_json(lowerCAmelCase_ ) for x in o]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {to_json(lowerCAmelCase_ ): to_json(lowerCAmelCase_ ) for k, v in o.items()}
else:
return o
| 682 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = 'philschmid/bart-large-cnn-samsum'
UpperCAmelCase : List[Any] = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
UpperCAmelCase : str = 'summarizer'
UpperCAmelCase : Union[str, Any] = AutoTokenizer
UpperCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM
UpperCAmelCase : List[Any] = ['text']
UpperCAmelCase : List[str] = ['text']
def snake_case_ ( self : Dict , __snake_case : Tuple ) -> Tuple:
return self.pre_processor(__snake_case , return_tensors='''pt''' , truncation=__snake_case )
def snake_case_ ( self : Optional[Any] , __snake_case : int ) -> List[str]:
return self.model.generate(**__snake_case )[0]
def snake_case_ ( self : List[str] , __snake_case : int ) -> List[str]:
return self.pre_processor.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
| 249 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class lowerCamelCase :
def __init__( self : Tuple , __snake_case : str = None , __snake_case : uuid.UUID = None , __snake_case : Union[str, Any]=None , __snake_case : str=None ) -> List[str]:
if not conversation_id:
_a : Optional[int] = uuid.uuida()
if past_user_inputs is None:
_a : int = []
if generated_responses is None:
_a : str = []
_a : uuid.UUID = conversation_id
_a : List[str] = past_user_inputs
_a : List[str] = generated_responses
_a : Optional[str] = text
def __eq__( self : str , __snake_case : Optional[Any] ) -> str:
if not isinstance(__snake_case , __snake_case ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def snake_case_ ( self : Dict , __snake_case : str , __snake_case : bool = False ) -> str:
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
_a : Optional[Any] = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
_a : Optional[Any] = text
def snake_case_ ( self : Optional[int] ) -> Optional[Any]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_a : Union[str, Any] = None
def snake_case_ ( self : int , __snake_case : str ) -> Any:
self.generated_responses.append(__snake_case )
def snake_case_ ( self : Any ) -> Dict:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any] ) -> Union[str, Any]:
_a : Tuple = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
_a : Optional[Any] = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple , *__snake_case : Optional[int] , **__snake_case : Any ) -> Dict:
super().__init__(*__snake_case , **__snake_case )
if self.tokenizer.pad_token_id is None:
_a : Tuple = self.tokenizer.eos_token
def snake_case_ ( self : Any , __snake_case : int=None , __snake_case : int=None , __snake_case : Union[str, Any]=None , **__snake_case : Optional[int] ) -> Any:
_a : Any = {}
_a : Optional[int] = {}
_a : Tuple = {}
if min_length_for_response is not None:
_a : Any = min_length_for_response
if minimum_tokens is not None:
_a : str = minimum_tokens
if "max_length" in generate_kwargs:
_a : str = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_a : Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__snake_case )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , __snake_case : Union[Conversation, List[Conversation]] , __snake_case : Union[str, Any]=0 , **__snake_case : Tuple ) -> Optional[int]:
_a : List[str] = super().__call__(__snake_case , num_workers=__snake_case , **__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) == 1:
return outputs[0]
return outputs
def snake_case_ ( self : List[Any] , __snake_case : Conversation , __snake_case : Optional[Any]=32 ) -> Dict[str, Any]:
if not isinstance(__snake_case , __snake_case ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
_a : str = self.tokenizer._build_conversation_input_ids(__snake_case )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_a : Any = self._legacy_parse_and_tokenize(__snake_case )
if self.framework == "pt":
_a : List[str] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_a : Optional[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def snake_case_ ( self : str , __snake_case : Any , __snake_case : Optional[int]=10 , **__snake_case : Optional[int] ) -> int:
_a : Optional[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
_a : List[str] = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
_a : Optional[Any] = max_length - minimum_tokens
_a : List[Any] = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
_a : int = model_inputs['''attention_mask'''][:, -trim:]
_a : Any = model_inputs.pop('''conversation''' )
_a : int = max_length
_a : Any = self.model.generate(**__snake_case , **__snake_case )
if self.model.config.is_encoder_decoder:
_a : List[Any] = 1
else:
_a : Union[str, Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def snake_case_ ( self : List[Any] , __snake_case : Dict , __snake_case : Dict=True ) -> Optional[int]:
_a : int = model_outputs['''output_ids''']
_a : int = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case , )
_a : Optional[Any] = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(__snake_case )
return conversation
def snake_case_ ( self : List[Any] , __snake_case : Conversation ) -> Dict:
_a : Any = self.tokenizer.eos_token_id
_a : Dict = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
if len(__snake_case ) > self.tokenizer.model_max_length:
_a : int = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 249 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCamelCase( _A : Any , _A : List[str]=() , _A : List[str]=None , _A : Dict="no" , _A : List[str]="29500" ):
'''simple docstring'''
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[str] = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
UpperCAmelCase__ : List[str] = True
elif "IPython" in sys.modules:
UpperCAmelCase__ : List[Any] = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
UpperCAmelCase__ : Tuple = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , _A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
UpperCAmelCase__ : Tuple = 8
UpperCAmelCase__ : Optional[Any] = PrepareForLaunch(_A , distributed_type='''TPU''' )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(_A , args=_A , nprocs=_A , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*_A )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port=_A , mixed_precision=_A ):
UpperCAmelCase__ : str = PrepareForLaunch(_A , distributed_type='''MULTI_GPU''' )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
UpperCAmelCase__ : Union[str, Any] = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*_A )
def __UpperCamelCase( _A : List[str] , _A : Optional[Any]=() , _A : str=2 ):
'''simple docstring'''
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=_A , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
UpperCAmelCase__ : Optional[int] = PrepareForLaunch(_A , debug=_A )
start_processes(_A , args=_A , nprocs=_A , start_method='''fork''' )
| 614 | '''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : int = AutoencoderKL
UpperCAmelCase_ : Any = '''sample'''
UpperCAmelCase_ : List[str] = 1E-2
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Dict = 4
UpperCAmelCase__ : Optional[int] = 3
UpperCAmelCase__ : Tuple = (32, 32)
UpperCAmelCase__ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return (3, 32, 32)
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == '''mps''' ,'''Gradient checkpointing skipped on MPS''' )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase__ : Any = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase__ : Optional[int] = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase__ : Optional[int] = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase__ : int = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase__ : Union[str, Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase__ : Union[str, Any] = dict(model.named_parameters() )
UpperCAmelCase__ : int = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Tuple = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
UpperCAmelCase__ : Optional[Any] = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
else:
UpperCAmelCase__ : Tuple = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase__ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase__ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : str = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase__ : int = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase__ : int = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase__ : List[Any] = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> int:
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ,lowerCamelCase_=(4, 3, 512, 512) ,lowerCamelCase_=False ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = torch.floataa if fpaa else torch.floataa
UpperCAmelCase__ : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def lowerCAmelCase__ ( self ,lowerCamelCase_="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_=False ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = '''fp16''' if fpaa else None
UpperCAmelCase__ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase__ : List[Any] = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder='''vae''' ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ) -> Optional[Any]:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_sd_vae_model()
UpperCAmelCase__ : str = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase__ : Dict = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : Tuple = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase__ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase__ : Tuple = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase__ : Tuple = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase__ : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase__ : List[str] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_sd_vae_model()
UpperCAmelCase__ : Optional[int] = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase__ : str = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase__ : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Dict = self.get_sd_vae_model()
UpperCAmelCase__ : Dict = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase__ : Optional[int] = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase__ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : int = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase__ : str = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : Dict = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase__ : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase__ : Any = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : Any = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase__ : int = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='''xformers is not required when using PyTorch 2.0.''' )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase__ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase__ : Tuple = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase__ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_sd_vae_model()
UpperCAmelCase__ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase__ : Tuple = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase__ : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase__ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 614 | 1 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
return getitem, k
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
return setitem, k, v
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
return delitem, k
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
try:
return fun(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ), None
except Exception as e:
return None, e
_SCREAMING_SNAKE_CASE = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
_SCREAMING_SNAKE_CASE = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
_SCREAMING_SNAKE_CASE = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
_SCREAMING_SNAKE_CASE = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
_SCREAMING_SNAKE_CASE = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_SCREAMING_SNAKE_CASE = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __a(SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
_lowerCAmelCase = HashMap(initial_block_size=4 )
_lowerCAmelCase = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase , _lowerCAmelCase = _run_operation(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase , _lowerCAmelCase = _run_operation(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE_ ) == str(SCREAMING_SNAKE_CASE_ )
assert set(SCREAMING_SNAKE_CASE_ ) == set(SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
assert set(my.items() ) == set(py.items() )
def __a():
'''simple docstring'''
def is_public(SCREAMING_SNAKE_CASE_ : str ) -> bool:
return not name.startswith("_" )
_lowerCAmelCase = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE_ )}
_lowerCAmelCase = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE_ )}
assert dict_public_names > hash_public_names
| 489 |
'''simple docstring'''
import math
def __a(SCREAMING_SNAKE_CASE_ : int = 100 ):
'''simple docstring'''
_lowerCAmelCase = sum(i * i for i in range(1 , n + 1 ) )
_lowerCAmelCase = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 489 | 1 |
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
a : Optional[Any] = "pt"
elif is_tf_available():
a : List[Any] = "tf"
else:
a : List[Any] = "jax"
class UpperCamelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PerceiverTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
def A_ ( self ):
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self ):
'''simple docstring'''
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def A_ ( self , **snake_case ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
def A_ ( self , snake_case , snake_case=False , snake_case=2_0 , snake_case=5 ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = []
for i in range(len(snake_case ) ):
try:
UpperCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase : Optional[int] = list(filter(lambda snake_case : re.match(r"^[ a-zA-Z]+$" , t[1] ) , snake_case ) )
UpperCAmelCase : Any = list(filter(lambda snake_case : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=snake_case ) , snake_case ) )
if max_length is not None and len(snake_case ) > max_length:
UpperCAmelCase : Optional[Any] = toks[:max_length]
if min_length is not None and len(snake_case ) < min_length and len(snake_case ) > 0:
while len(snake_case ) < min_length:
UpperCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase : Dict = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase : Any = tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case )
if " " not in output_txt and len(snake_case ) > 1:
UpperCAmelCase : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case )
)
if with_prefix_space:
UpperCAmelCase : Union[str, Any] = " " + output_txt
UpperCAmelCase : Dict = tokenizer.encode(snake_case , add_special_tokens=snake_case )
return output_txt, output_ids
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.perceiver_tokenizer
UpperCAmelCase : Tuple = "Unicode €."
UpperCAmelCase : int = tokenizer(snake_case )
UpperCAmelCase : Tuple = [4, 9_1, 1_1_6, 1_1_1, 1_0_5, 1_1_7, 1_0_6, 1_0_7, 3_8, 2_3_2, 1_3_6, 1_7_8, 5_2, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Optional[Any] = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase : Tuple = tokenizer("e è é ê ë" )
UpperCAmelCase : str = [4, 1_0_7, 3_8, 2_0_1, 1_7_4, 3_8, 2_0_1, 1_7_5, 3_8, 2_0_1, 1_7_6, 3_8, 2_0_1, 1_7_7, 5]
self.assertEqual(encoded["input_ids"] , snake_case )
# decoding
UpperCAmelCase : Dict = tokenizer.decode(snake_case )
self.assertEqual(snake_case , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase : List[str] = [4, 7_1, 3_8, 1_1_4, 1_1_7, 1_1_6, 1_0_9, 3_8, 1_1_8, 1_0_3, 1_2_0, 1_0_3, 1_0_9, 1_2_0, 1_0_3, 1_1_8, 1_1_0, 3_8, 1_0_8, 1_1_7, 1_2_0, 3_8, 1_2_1, 1_2_3, 1_1_5, 1_1_5, 1_0_3, 1_2_0, 1_1_1, 1_2_8, 1_0_3, 1_2_2, 1_1_1, 1_1_7, 1_1_6, 5_2, 5, 0]
# fmt: on
UpperCAmelCase : Dict = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
self.assertIsInstance(snake_case , snake_case )
if FRAMEWORK != "jax":
UpperCAmelCase : List[Any] = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase : str = list(batch.input_ids.tolist()[0] )
self.assertListEqual(snake_case , snake_case )
self.assertEqual((2, 3_8) , batch.input_ids.shape )
self.assertEqual((2, 3_8) , batch.attention_mask.shape )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase : List[Any] = tokenizer(snake_case , padding=snake_case , return_tensors=snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , snake_case )
self.assertIn("attention_mask" , snake_case )
self.assertNotIn("decoder_input_ids" , snake_case )
self.assertNotIn("decoder_attention_mask" , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.perceiver_tokenizer
UpperCAmelCase : int = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase : List[Any] = tokenizer(
text_target=snake_case , max_length=3_2 , padding="max_length" , truncation=snake_case , return_tensors=snake_case )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Dict = tempfile.mkdtemp()
UpperCAmelCase : Any = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : List[str] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
shutil.rmtree(snake_case )
UpperCAmelCase : Dict = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : str = tempfile.mkdtemp()
UpperCAmelCase : int = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase : int = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(snake_case )
UpperCAmelCase : Union[str, Any] = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCAmelCase : Optional[int] = tokenizer.__class__.from_pretrained(snake_case , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case )
with open(os.path.join(snake_case , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Union[str, Any] = json.load(snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase : Any = json.load(snake_case )
UpperCAmelCase : str = [f"<extra_id_{i}>" for i in range(1_2_5 )]
UpperCAmelCase : List[Any] = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase : List[str] = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(snake_case , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(snake_case , snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : Optional[Any] = tokenizer_class.from_pretrained(
snake_case , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Optional[int] = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=snake_case )]
UpperCAmelCase : Optional[int] = tokenizer_class.from_pretrained(
snake_case , additional_special_tokens=snake_case , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : int = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_7_8] ) , "�" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_tokenizers(fast=snake_case , do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : List[Any] = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase : int = tokenizer.convert_tokens_to_string(snake_case )
self.assertIsInstance(snake_case , snake_case )
| 679 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
lowerCAmelCase__ = TypeVar('''U''')
class __snake_case ( Generic[T, U]):
def __init__( self : str , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = key
_lowerCamelCase : List[str] = val
_lowerCamelCase : Optional[Any] = None
_lowerCamelCase : Any = None
def __repr__( self : List[Any] ):
"""simple docstring"""
return (
f'''Node: key: {self.key}, val: {self.val}, '''
f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class __snake_case ( Generic[T, U]):
def __init__( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : str = DoubleLinkedListNode(A__ , A__ )
_lowerCamelCase : Optional[Any] = DoubleLinkedListNode(A__ , A__ )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.rear, self.head
def __repr__( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['''DoubleLinkedList''']
_lowerCamelCase : Optional[int] = self.head
while node.next is not None:
rep.append(str(A__ ) )
_lowerCamelCase : int = node.next
rep.append(str(self.rear ) )
return ",\n ".join(A__ )
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_lowerCamelCase : str = node
_lowerCamelCase : Optional[Any] = previous
_lowerCamelCase : Optional[Any] = node
_lowerCamelCase : Tuple = self.rear
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Any ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_lowerCamelCase : Union[str, Any] = node.next
_lowerCamelCase : List[Any] = node.prev
_lowerCamelCase : int = None
_lowerCamelCase : Optional[Any] = None
return node
class __snake_case ( Generic[T, U]):
snake_case__ : Dict = {}
def __init__( self : Optional[int] , __lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = DoubleLinkedList()
_lowerCamelCase : Optional[int] = capacity
_lowerCamelCase : Dict = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Optional[Any] = {}
def __repr__( self : List[Any] ):
"""simple docstring"""
return (
f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
f'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self : Optional[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
return key in self.cache
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
_lowerCamelCase : List[Any] = self.cache[key]
_lowerCamelCase : Optional[int] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(A__ )
return node.val
self.miss += 1
return None
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_lowerCamelCase : List[str] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(A__ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_lowerCamelCase : str = DoubleLinkedListNode(A__ , A__ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_lowerCamelCase : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_lowerCamelCase : Optional[int] = value
self.list.add(A__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , __lowerCAmelCase : Optional[Any] = 1_2_8 ):
"""simple docstring"""
def cache_decorator_inner(__lowerCAmelCase : Optional[Any] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__lowerCAmelCase : Tuple ) -> U:
if func not in cls.decorator_function_to_instance_map:
_lowerCamelCase : Any = LRUCache(A__ )
_lowerCamelCase : Any = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_lowerCamelCase : str = func(*A__ )
cls.decorator_function_to_instance_map[func].put(args[0] , A__ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(A__ , '''cache_info''' , A__ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
"""simple docstring"""
def snake_case_ ( A_ : int = 10, A_ : int = 22 ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = range(1, A_ )
_lowerCamelCase : Dict = range(1, A_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 598 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCAmelCase_ = logging.get_logger(__name__)
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = ['pixel_values']
def __init__( self : Optional[Any] ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = PILImageResampling.BILINEAR ,_snake_case : bool = True ,_snake_case : Union[int, float] = 1 / 255 ,_snake_case : bool = True ,_snake_case : Dict[str, int] = None ,_snake_case : bool = True ,**_snake_case : List[Any] ,) -> None:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase__ : Optional[int] = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
lowercase__ : Any = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
lowercase__ : str = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name='''crop_size''' )
lowercase__ : Dict = do_resize
lowercase__ : str = size
lowercase__ : Any = resample
lowercase__ : Optional[int] = do_rescale
lowercase__ : List[str] = rescale_factor
lowercase__ : Union[str, Any] = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : List[str] = do_flip_channel_order
def UpperCAmelCase ( self : str ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : PILImageResampling = PIL.Image.BILINEAR ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Optional[Any] ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ : List[Any] = get_resize_output_image_size(__SCREAMING_SNAKE_CASE ,size=size['''shortest_edge'''] ,default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,resample=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : np.ndarray ,_snake_case : Dict[str, int] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : List[str] ,) -> np.ndarray:
"""simple docstring"""
lowercase__ : int = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__SCREAMING_SNAKE_CASE ,size=(size['''height'''], size['''width''']) ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : Tuple ,_snake_case : np.ndarray ,_snake_case : Union[int, float] ,_snake_case : Optional[Union[str, ChannelDimension]] = None ,**_snake_case : Tuple ,) -> Any:
"""simple docstring"""
return rescale(__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : List[str] ,_snake_case : np.ndarray ,_snake_case : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
"""simple docstring"""
return flip_channel_order(__SCREAMING_SNAKE_CASE ,data_format=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : Dict ,_snake_case : ImageInput ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : PILImageResampling = None ,_snake_case : bool = None ,_snake_case : float = None ,_snake_case : bool = None ,_snake_case : Dict[str, int] = None ,_snake_case : bool = None ,_snake_case : Optional[Union[str, TensorType]] = None ,_snake_case : ChannelDimension = ChannelDimension.FIRST ,**_snake_case : Optional[Any] ,) -> PIL.Image.Image:
"""simple docstring"""
lowercase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Tuple = resample if resample is not None else self.resample
lowercase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Optional[Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase__ : Union[str, Any] = size if size is not None else self.size
lowercase__ : int = get_size_dict(__SCREAMING_SNAKE_CASE ,default_to_square=__SCREAMING_SNAKE_CASE )
lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size
lowercase__ : List[str] = get_size_dict(__SCREAMING_SNAKE_CASE ,param_name='''crop_size''' )
lowercase__ : Union[str, Any] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
lowercase__ : str = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowercase__ : Optional[int] = [self.resize(image=__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ,resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowercase__ : Tuple = [self.center_crop(image=__SCREAMING_SNAKE_CASE ,size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Union[str, Any] = [self.rescale(image=__SCREAMING_SNAKE_CASE ,scale=__SCREAMING_SNAKE_CASE ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase__ : Dict = [self.flip_channel_order(image=__SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Any = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE ,tensor_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : Any ,_snake_case : Dict ,_snake_case : List[Tuple] = None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = target_sizes.numpy()
lowercase__ : List[Any] = []
for idx in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase__ : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='''bilinear''' ,align_corners=__SCREAMING_SNAKE_CASE )
lowercase__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = logits.argmax(dim=1 )
lowercase__ : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 560 |
from __future__ import annotations
from collections.abc import Iterator
class _A :
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> None:
__UpperCAmelCase =value
__UpperCAmelCase =None
__UpperCAmelCase =None
class _A :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Node ) -> None:
__UpperCAmelCase =tree
def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["OwlViTFeatureExtractor"]
__magic_name__ = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 248 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCAmelCase ( UpperCamelCase_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = 2
while True:
if is_prime(UpperCamelCase_ ):
yield num
num += 1
def _lowerCAmelCase ( UpperCamelCase_ = 200_0000 ):
return sum(takewhile(lambda UpperCamelCase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase ( a ):
def __snake_case( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCamelCase , "width_multiplier" ) )
class lowercase :
def __init__( self : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : int=13 , _UpperCamelCase : List[str]=64 , _UpperCamelCase : Any=2 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : int="swish" , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : List[Any]=0.0_2 , _UpperCamelCase : Tuple=True , _UpperCamelCase : str=True , _UpperCamelCase : int=10 , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Optional[Any]=0.2_5 , _UpperCamelCase : Tuple=0.0 , _UpperCamelCase : List[str]=0.0 , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = make_divisible(512 * width_multiplier , divisor=8 )
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = conv_kernel_size
SCREAMING_SNAKE_CASE = output_stride
SCREAMING_SNAKE_CASE = classifier_dropout_prob
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = width_multiplier
SCREAMING_SNAKE_CASE = ffn_dropout
SCREAMING_SNAKE_CASE = attn_dropout
def __snake_case( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __snake_case( self : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileViTVaModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case( self : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MobileViTVaForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case( self : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = MobileViTVaForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
SCREAMING_SNAKE_CASE = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( a , a , unittest.TestCase ):
lowercase__ : List[str] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase__ : Optional[Any] = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase__ : Optional[int] = False
lowercase__ : int = False
lowercase__ : Optional[int] = False
lowercase__ : Optional[Any] = False
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileViTVaModelTester(self )
SCREAMING_SNAKE_CASE = MobileViTVaConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def __snake_case( self : Dict ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def __snake_case( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def __snake_case( self : Any ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def __snake_case( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __snake_case( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def __snake_case( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __snake_case( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(_UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ):
SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 5
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE = 2
for i in range(len(_UpperCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __snake_case( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __snake_case( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
@slow
def __snake_case( self : Dict ) -> Tuple:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = MobileViTVaModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def __lowerCamelCase ():
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __snake_case( self : Any ) -> Any:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def __snake_case( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def __snake_case( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
SCREAMING_SNAKE_CASE = model.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=_UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
@slow
def __snake_case( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
SCREAMING_SNAKE_CASE = model.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=_UpperCamelCase , return_tensors="pt" ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase , target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
SCREAMING_SNAKE_CASE = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
| 403 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowercase ( a ):
lowercase__ : Any = """altclip_text_model"""
def __init__( self : str , _UpperCamelCase : Dict=250_002 , _UpperCamelCase : List[Any]=1_024 , _UpperCamelCase : str=24 , _UpperCamelCase : str=16 , _UpperCamelCase : Tuple=4_096 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : int=514 , _UpperCamelCase : int=1 , _UpperCamelCase : Dict=0.0_2 , _UpperCamelCase : Union[str, Any]=0.0_2 , _UpperCamelCase : Union[str, Any]=1e-05 , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : Any=0 , _UpperCamelCase : Tuple=2 , _UpperCamelCase : Optional[Any]="absolute" , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Union[str, Any]=768 , **_UpperCamelCase : Optional[int] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = position_embedding_type
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = project_dim
class lowercase ( a ):
lowercase__ : Any = """altclip_vision_model"""
def __init__( self : Dict , _UpperCamelCase : Optional[int]=768 , _UpperCamelCase : Optional[int]=3_072 , _UpperCamelCase : List[Any]=512 , _UpperCamelCase : Union[str, Any]=12 , _UpperCamelCase : List[Any]=12 , _UpperCamelCase : int=3 , _UpperCamelCase : List[Any]=224 , _UpperCamelCase : Optional[int]=32 , _UpperCamelCase : Optional[Any]="quick_gelu" , _UpperCamelCase : int=1e-5 , _UpperCamelCase : str=0.0 , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : Any=1.0 , **_UpperCamelCase : Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = initializer_factor
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = hidden_act
@classmethod
def __snake_case( cls : Tuple , _UpperCamelCase : Union[str, os.PathLike] , **_UpperCamelCase : Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCamelCase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
SCREAMING_SNAKE_CASE = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class lowercase ( a ):
lowercase__ : Dict = """altclip"""
lowercase__ : Any = True
def __init__( self : int , _UpperCamelCase : Tuple=None , _UpperCamelCase : Any=None , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : int=2.6_5_9_2 , **_UpperCamelCase : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.pop("text_config_dict" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = kwargs.pop("vision_config_dict" , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
SCREAMING_SNAKE_CASE = {}
# This is the complete result when using `text_config_dict`.
SCREAMING_SNAKE_CASE = AltCLIPTextConfig(**_UpperCamelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
SCREAMING_SNAKE_CASE = (
F"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
F"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
SCREAMING_SNAKE_CASE = (
F"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
F"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(_UpperCamelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
# This is the complete result when using `vision_config_dict`.
SCREAMING_SNAKE_CASE = AltCLIPVisionConfig(**_UpperCamelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
SCREAMING_SNAKE_CASE = {
str(_UpperCamelCase ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
SCREAMING_SNAKE_CASE = (
F"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
F"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
SCREAMING_SNAKE_CASE = (
F"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
F"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(_UpperCamelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
SCREAMING_SNAKE_CASE = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
SCREAMING_SNAKE_CASE = AltCLIPTextConfig(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = AltCLIPVisionConfig(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = projection_dim
SCREAMING_SNAKE_CASE = logit_scale_init_value
SCREAMING_SNAKE_CASE = 1.0
@classmethod
def __snake_case( cls : Union[str, Any] , _UpperCamelCase : AltCLIPTextConfig , _UpperCamelCase : AltCLIPVisionConfig , **_UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCamelCase )
def __snake_case( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE = self.text_config.to_dict()
SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 403 | 1 |
def _lowercase ( a__ : str , a__ : Dict ) -> int:
"""simple docstring"""
_UpperCamelCase = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
_UpperCamelCase = n - k
# Calculate C(n,k)
for i in range(__A ):
result *= n - i
result //= i + 1
return result
def _lowercase ( a__ : List[Any] ) -> str:
"""simple docstring"""
return binomial_coefficient(2 * node_count , __A ) // (node_count + 1)
def _lowercase ( a__ : Tuple ) -> Tuple:
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
_UpperCamelCase = 1
for i in range(1 , n + 1 ):
result *= i
return result
def _lowercase ( a__ : Tuple ) -> Optional[Any]:
"""simple docstring"""
return catalan_number(__A ) * factorial(__A )
if __name__ == "__main__":
__lowerCAmelCase = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
F'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 715 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( lowercase , unittest.TestCase ):
__lowercase : Union[str, Any] = AlbertTokenizer
__lowercase : Any = AlbertTokenizerFast
__lowercase : Dict = True
__lowercase : Union[str, Any] = True
__lowercase : str = True
def lowercase ( self ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = AlbertTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase ( self , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = "this is a test"
_UpperCamelCase = "this is a test"
return input_text, output_text
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = "<pad>"
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(lowerCamelCase_ ) , 3_00_00 )
def lowercase ( self ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def lowercase ( self ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = "I was born in 92000, and this is falsé."
_UpperCamelCase = tokenizer.tokenize(lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowerCamelCase_ )
_UpperCamelCase = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = AlbertTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_UpperCamelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase_ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [48, 25, 21, 12_89] )
_UpperCamelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
_UpperCamelCase = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def lowercase ( self ) -> str:
"""simple docstring"""
_UpperCamelCase = AlbertTokenizer(lowerCamelCase_ )
_UpperCamelCase = tokenizer.encode("sequence builders" )
_UpperCamelCase = tokenizer.encode("multi-sequence build" )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 589 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def UpperCamelCase ( snake_case__ , snake_case__):
return line.startswith(snake_case__) or len(snake_case__) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , snake_case__) is not None
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Tuple = object_name.split(".")
lowerCAmelCase_ : Union[str, Any] = 0
# First let's find the module where our object lives.
lowerCAmelCase_ : Union[str, Any] = parts[i]
while i < len(snake_case__) and not os.path.isfile(os.path.join(snake_case__ , F'''{module}.py''')):
i += 1
if i < len(snake_case__):
lowerCAmelCase_ : Dict = os.path.join(snake_case__ , parts[i])
if i >= len(snake_case__):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(snake_case__ , F'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Optional[Any] = f.readlines()
# Now let's find the class / func in the code!
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(snake_case__) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(snake_case__):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCAmelCase_ : Union[str, Any] = line_index
while line_index < len(snake_case__) and _should_continue(lines[line_index] , snake_case__):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : List[str] = lines[start_index:line_index]
return "".join(snake_case__)
_lowercase = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
_lowercase = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
_lowercase = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = code.split("\n")
lowerCAmelCase_ : Any = 0
while idx < len(snake_case__) and len(lines[idx]) == 0:
idx += 1
if idx < len(snake_case__):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = len(get_indent(snake_case__)) > 0
if has_indent:
lowerCAmelCase_ : Dict = F'''class Bla:\n{code}'''
lowerCAmelCase_ : Optional[int] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=snake_case__)
lowerCAmelCase_ : Optional[Any] = black.format_str(snake_case__ , mode=snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = style_docstrings_in_code(snake_case__)
return result[len("class Bla:\n") :] if has_indent else result
def UpperCamelCase ( snake_case__ , snake_case__=False):
with open(snake_case__ , "r" , encoding="utf-8" , newline="\n") as f:
lowerCAmelCase_ : Tuple = f.readlines()
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(snake_case__):
lowerCAmelCase_ : Optional[int] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = search.groups()
lowerCAmelCase_ : int = find_code_in_diffusers(snake_case__)
lowerCAmelCase_ : Dict = get_indent(snake_case__)
lowerCAmelCase_ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCAmelCase_ : str = theoretical_indent
lowerCAmelCase_ : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCAmelCase_ : Optional[int] = True
while line_index < len(snake_case__) and should_continue:
line_index += 1
if line_index >= len(snake_case__):
break
lowerCAmelCase_ : Dict = lines[line_index]
lowerCAmelCase_ : List[str] = _should_continue(snake_case__ , snake_case__) and re.search(F'''^{indent}# End copy''' , snake_case__) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowerCAmelCase_ : Dict = lines[start_index:line_index]
lowerCAmelCase_ : Optional[int] = "".join(snake_case__)
# Remove any nested `Copied from` comments to avoid circular copies
lowerCAmelCase_ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(snake_case__) is None]
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
# Before comparing, use the `replace_pattern` on the original code.
if len(snake_case__) > 0:
lowerCAmelCase_ : List[str] = replace_pattern.replace("with" , "").split(",")
lowerCAmelCase_ : Tuple = [_re_replace_pattern.search(snake_case__) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = pattern.groups()
lowerCAmelCase_ : int = re.sub(snake_case__ , snake_case__ , snake_case__)
if option.strip() == "all-casing":
lowerCAmelCase_ : List[str] = re.sub(obja.lower() , obja.lower() , snake_case__)
lowerCAmelCase_ : int = re.sub(obja.upper() , obja.upper() , snake_case__)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCAmelCase_ : List[Any] = blackify(lines[start_index - 1] + theoretical_code)
lowerCAmelCase_ : Union[str, Any] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowerCAmelCase_ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCAmelCase_ : Union[str, Any] = start_index + 1
if overwrite and len(snake_case__) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''')
with open(snake_case__ , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(snake_case__)
return diffs
def UpperCamelCase ( snake_case__ = False):
lowerCAmelCase_ : Tuple = glob.glob(os.path.join(snake_case__ , "**/*.py") , recursive=snake_case__)
lowerCAmelCase_ : int = []
for filename in all_files:
lowerCAmelCase_ : Union[str, Any] = is_copy_consistent(snake_case__ , snake_case__)
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = "\n".join(snake_case__)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 659 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'swinv2'
UpperCamelCase_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] ,lowerCAmelCase__ : Optional[int]=2_24 ,lowerCAmelCase__ : Dict=4 ,lowerCAmelCase__ : Dict=3 ,lowerCAmelCase__ : List[Any]=96 ,lowerCAmelCase__ : Optional[Any]=[2, 2, 6, 2] ,lowerCAmelCase__ : Optional[Any]=[3, 6, 12, 24] ,lowerCAmelCase__ : Optional[int]=7 ,lowerCAmelCase__ : Dict=4.0 ,lowerCAmelCase__ : Dict=True ,lowerCAmelCase__ : str=0.0 ,lowerCAmelCase__ : Tuple=0.0 ,lowerCAmelCase__ : str=0.1 ,lowerCAmelCase__ : List[str]="gelu" ,lowerCAmelCase__ : Union[str, Any]=False ,lowerCAmelCase__ : Dict=0.02 ,lowerCAmelCase__ : int=1e-5 ,lowerCAmelCase__ : List[str]=32 ,**lowerCAmelCase__ : Tuple ,) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Dict = num_channels
lowerCAmelCase_ : Optional[int] = embed_dim
lowerCAmelCase_ : Optional[Any] = depths
lowerCAmelCase_ : Any = len(lowerCAmelCase__ )
lowerCAmelCase_ : str = num_heads
lowerCAmelCase_ : List[str] = window_size
lowerCAmelCase_ : List[str] = mlp_ratio
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : Union[str, Any] = drop_path_rate
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Any = use_absolute_embeddings
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : Tuple = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
lowerCAmelCase_ : str = (0, 0, 0, 0)
| 659 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def a__ ( ):
UpperCAmelCase_ = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
UpperCAmelCase_ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase__ )
DownloadCommand.register_subcommand(lowerCAmelCase__ )
EnvironmentCommand.register_subcommand(lowerCAmelCase__ )
RunCommand.register_subcommand(lowerCAmelCase__ )
ServeCommand.register_subcommand(lowerCAmelCase__ )
UserCommands.register_subcommand(lowerCAmelCase__ )
AddNewModelCommand.register_subcommand(lowerCAmelCase__ )
AddNewModelLikeCommand.register_subcommand(lowerCAmelCase__ )
LfsCommands.register_subcommand(lowerCAmelCase__ )
PTtoTFCommand.register_subcommand(lowerCAmelCase__ )
# Let's go
UpperCAmelCase_ = parser.parse_args()
if not hasattr(lowerCAmelCase__ , "func" ):
parser.print_help()
exit(1 )
# Run
UpperCAmelCase_ = args.func(lowerCAmelCase__ )
service.run()
if __name__ == "__main__":
main()
| 14 |
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ = MaskFormerConfig(backbone_config=lowerCAmelCase__ )
UpperCAmelCase_ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ = 847
UpperCAmelCase_ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ = 150
UpperCAmelCase_ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ = 171
UpperCAmelCase_ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ = 133
UpperCAmelCase_ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ = 19
UpperCAmelCase_ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ = 65
UpperCAmelCase_ = "mapillary-vistas-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
return config
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dct.pop(lowerCAmelCase__ )
UpperCAmelCase_ = val
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:dim, :]
UpperCAmelCase_ = in_proj_bias[: dim]
UpperCAmelCase_ = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ = in_proj_bias[-dim :]
# fmt: on
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
# fmt: off
UpperCAmelCase_ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ = in_proj_bias[-hidden_size :]
# fmt: on
def a__ ( ):
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ):
UpperCAmelCase_ = get_maskformer_config(lowerCAmelCase__ )
# load original state_dict
with open(lowerCAmelCase__ , "rb" ) as f:
UpperCAmelCase_ = pickle.load(lowerCAmelCase__ )
UpperCAmelCase_ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ = create_rename_keys(lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_swin_q_k_v(lowerCAmelCase__ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ = torch.from_numpy(lowerCAmelCase__ )
# load 🤗 model
UpperCAmelCase_ = MaskFormerForInstanceSegmentation(lowerCAmelCase__ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase__ , param.shape )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase__ ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ = 65535
else:
UpperCAmelCase_ = 255
UpperCAmelCase_ = True if "ade" in model_name else False
UpperCAmelCase_ = MaskFormerImageProcessor(ignore_index=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ )
UpperCAmelCase_ = image_processor(lowerCAmelCase__ , return_tensors="pt" )
UpperCAmelCase_ = model(**lowerCAmelCase__ )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 1 |
"""simple docstring"""
from collections import defaultdict
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self: Union[str, Any] , UpperCamelCase_: Optional[int] , UpperCamelCase_: Optional[Any] ):
UpperCamelCase_ =total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
UpperCamelCase_ =[
[-1 for i in range(total + 1 )] for j in range(2 ** len(UpperCamelCase_ ) )
]
UpperCamelCase_ =defaultdict(UpperCamelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
UpperCamelCase_ =(1 << len(UpperCamelCase_ )) - 1
def UpperCamelCase__ ( self: str , UpperCamelCase_: List[Any] , UpperCamelCase_: str ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
UpperCamelCase_ =self.count_ways_until(UpperCamelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
UpperCamelCase_ =total_ways_util
return self.dp[mask][task_no]
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: Any ):
# Store the list of persons for each task
for i in range(len(UpperCamelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(UpperCamelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
A_ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
A_ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 391 |
"""simple docstring"""
import string
def _UpperCamelCase ( A ):
UpperCamelCase_ =""
for i in sequence:
UpperCamelCase_ =ord(A )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _UpperCamelCase ( A ):
UpperCamelCase_ =string.ascii_letters
UpperCamelCase_ =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(A )] if c in letters else c for c in sequence )
def _UpperCamelCase ( ):
from timeit import timeit
print("Running performance benchmarks..." )
UpperCamelCase_ ="from string import printable ; from __main__ import atbash, atbash_slow"
print(f"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=A )} seconds""" )
print(f"""> atbash(): {timeit("atbash(printable)" , setup=A )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 391 | 1 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowercase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __A ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __A ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return max(metric_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , "r" ).readlines()]
__SCREAMING_SNAKE_CASE : int = []
if args.gold_data_mode == "qa":
__SCREAMING_SNAKE_CASE : Union[str, Any] = pd.read_csv(_SCREAMING_SNAKE_CASE , sep="\t" , header=_SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
__SCREAMING_SNAKE_CASE : Optional[Any] = ast.literal_eval(_SCREAMING_SNAKE_CASE )
answers.append(_SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , "r" ).readlines()]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [[reference] for reference in references]
__SCREAMING_SNAKE_CASE : List[Any] = 0
for prediction, ground_truths in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = 1_0_0.0 * em / total
__SCREAMING_SNAKE_CASE : str = 1_0_0.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def __A ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = args.k
__SCREAMING_SNAKE_CASE : str = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , "r" ).readlines()]
__SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , "r" ).readlines()]
__SCREAMING_SNAKE_CASE : List[Any] = 0
for hypo, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Tuple = set(hypo.split("\t" )[:k] )
__SCREAMING_SNAKE_CASE : List[Any] = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
__SCREAMING_SNAKE_CASE : str = 1_0_0.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def __A ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
def strip_title(_SCREAMING_SNAKE_CASE : Optional[int] ):
if title.startswith("\"" ):
__SCREAMING_SNAKE_CASE : Optional[Any] = title[1:]
if title.endswith("\"" ):
__SCREAMING_SNAKE_CASE : Dict = title[:-1]
return title
__SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors="pt" , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , )["input_ids"].to(args.device )
__SCREAMING_SNAKE_CASE : Tuple = rag_model.rag.question_encoder(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = question_enc_outputs[0]
__SCREAMING_SNAKE_CASE : int = rag_model.retriever(
_SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
__SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
__SCREAMING_SNAKE_CASE : List[Any] = []
for docs in all_docs:
__SCREAMING_SNAKE_CASE : int = [strip_title(_SCREAMING_SNAKE_CASE ) for title in docs["title"]]
provenance_strings.append("\t".join(_SCREAMING_SNAKE_CASE ) )
return provenance_strings
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors="pt" , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict.input_ids.to(args.device )
__SCREAMING_SNAKE_CASE : Tuple = inputs_dict.attention_mask.to(args.device )
__SCREAMING_SNAKE_CASE : Union[str, Any] = rag_model.generate( # rag_model overwrites generate
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
__SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.generator_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info("Q: {} - A: {}".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return answers
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=_SCREAMING_SNAKE_CASE , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=_SCREAMING_SNAKE_CASE , choices=["exact", "compressed", "legacy"] , type=_SCREAMING_SNAKE_CASE , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=_SCREAMING_SNAKE_CASE , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=_SCREAMING_SNAKE_CASE , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=_SCREAMING_SNAKE_CASE , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=_SCREAMING_SNAKE_CASE , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=_SCREAMING_SNAKE_CASE , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=_SCREAMING_SNAKE_CASE , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=_SCREAMING_SNAKE_CASE , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=_SCREAMING_SNAKE_CASE , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=5_0 , type=_SCREAMING_SNAKE_CASE , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
__SCREAMING_SNAKE_CASE : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def __A ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {}
if args.model_type is None:
__SCREAMING_SNAKE_CASE : Dict = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
__SCREAMING_SNAKE_CASE : List[str] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
__SCREAMING_SNAKE_CASE : Tuple = args.n_docs
if args.index_name is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = args.index_name
if args.index_path is not None:
__SCREAMING_SNAKE_CASE : int = args.index_path
else:
__SCREAMING_SNAKE_CASE : List[str] = BartForConditionalGeneration
__SCREAMING_SNAKE_CASE : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : str = get_scores if args.eval_mode == "e2e" else get_precision_at_k
__SCREAMING_SNAKE_CASE : int = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(_SCREAMING_SNAKE_CASE ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
__SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , retriever=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
__SCREAMING_SNAKE_CASE : Dict = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
__SCREAMING_SNAKE_CASE : Any = []
for line in tqdm(_SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(_SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
__SCREAMING_SNAKE_CASE : Any = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write("\n".join(_SCREAMING_SNAKE_CASE ) + "\n" )
preds_file.flush()
__SCREAMING_SNAKE_CASE : Tuple = []
if len(_SCREAMING_SNAKE_CASE ) > 0:
__SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write("\n".join(_SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowercase = get_args()
main(args)
| 564 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __A ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return EnvironmentCommand()
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def a_ ( a__ ):
__SCREAMING_SNAKE_CASE : int = parser.add_parser("env" )
download_parser.set_defaults(func=a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Any = huggingface_hub.__version__
__SCREAMING_SNAKE_CASE : Dict = "not installed"
__SCREAMING_SNAKE_CASE : Dict = "NA"
if is_torch_available():
import torch
__SCREAMING_SNAKE_CASE : List[str] = torch.__version__
__SCREAMING_SNAKE_CASE : List[str] = torch.cuda.is_available()
__SCREAMING_SNAKE_CASE : Optional[Any] = "not installed"
if is_transformers_available():
import transformers
__SCREAMING_SNAKE_CASE : str = transformers.__version__
__SCREAMING_SNAKE_CASE : List[str] = "not installed"
if is_accelerate_available():
import accelerate
__SCREAMING_SNAKE_CASE : List[str] = accelerate.__version__
__SCREAMING_SNAKE_CASE : Union[str, Any] = "not installed"
if is_xformers_available():
import xformers
__SCREAMING_SNAKE_CASE : List[Any] = xformers.__version__
__SCREAMING_SNAKE_CASE : int = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})',
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a__ ) )
return info
@staticmethod
def a_ ( a__ ):
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 564 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 47 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class snake_case_ ( a ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
class snake_case_ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = (1_6, 3_2, 9_6, 2_5_6)
__UpperCamelCase = jnp.floataa
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase__ =nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
UpperCAmelCase__ =[]
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase__ =self.block_out_channels[i]
UpperCAmelCase__ =self.block_out_channels[i + 1]
UpperCAmelCase__ =nn.Conv(
A_, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(A_ )
UpperCAmelCase__ =nn.Conv(
A_, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(A_ )
UpperCAmelCase__ =blocks
UpperCAmelCase__ =nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, A_ ) -> Any:
UpperCAmelCase__ =self.conv_in(A_ )
UpperCAmelCase__ =nn.silu(A_ )
for block in self.blocks:
UpperCAmelCase__ =block(A_ )
UpperCAmelCase__ =nn.silu(A_ )
UpperCAmelCase__ =self.conv_out(A_ )
return embedding
@flax_register_to_config
class snake_case_ ( nn.Module, a, a ):
'''simple docstring'''
__UpperCamelCase = 3_2
__UpperCamelCase = 4
__UpperCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__UpperCamelCase = False
__UpperCamelCase = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__UpperCamelCase = 2
__UpperCamelCase = 8
__UpperCamelCase = None
__UpperCamelCase = 1_2_8_0
__UpperCamelCase = 0.0
__UpperCamelCase = False
__UpperCamelCase = jnp.floataa
__UpperCamelCase = True
__UpperCamelCase = 0
__UpperCamelCase = "rgb"
__UpperCamelCase = (1_6, 3_2, 9_6, 2_5_6)
def __UpperCAmelCase ( self, A_ ) -> FrozenDict:
# init input tensors
UpperCAmelCase__ =(1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase__ =jnp.zeros(A_, dtype=jnp.floataa )
UpperCAmelCase__ =jnp.ones((1,), dtype=jnp.intaa )
UpperCAmelCase__ =jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
UpperCAmelCase__ =(1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase__ =jnp.zeros(A_, dtype=jnp.floataa )
UpperCAmelCase__ , UpperCAmelCase__ =jax.random.split(A_ )
UpperCAmelCase__ ={"params": params_rng, "dropout": dropout_rng}
return self.init(A_, A_, A_, A_, A_ )["params"]
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase__ =self.block_out_channels
UpperCAmelCase__ =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase__ =self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase__ =nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
UpperCAmelCase__ =FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
UpperCAmelCase__ =FlaxTimestepEmbedding(A_, dtype=self.dtype )
UpperCAmelCase__ =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
UpperCAmelCase__ =self.only_cross_attention
if isinstance(A_, A_ ):
UpperCAmelCase__ =(only_cross_attention,) * len(self.down_block_types )
if isinstance(A_, A_ ):
UpperCAmelCase__ =(num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase__ =[]
UpperCAmelCase__ =[]
UpperCAmelCase__ =block_out_channels[0]
UpperCAmelCase__ =nn.Conv(
A_, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(A_ )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase__ =output_channel
UpperCAmelCase__ =block_out_channels[i]
UpperCAmelCase__ =i == len(A_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase__ =FlaxCrossAttnDownBlockaD(
in_channels=A_, out_channels=A_, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
UpperCAmelCase__ =FlaxDownBlockaD(
in_channels=A_, out_channels=A_, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(A_ )
for _ in range(self.layers_per_block ):
UpperCAmelCase__ =nn.Conv(
A_, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(A_ )
if not is_final_block:
UpperCAmelCase__ =nn.Conv(
A_, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(A_ )
UpperCAmelCase__ =down_blocks
UpperCAmelCase__ =controlnet_down_blocks
# mid
UpperCAmelCase__ =block_out_channels[-1]
UpperCAmelCase__ =FlaxUNetMidBlockaDCrossAttn(
in_channels=A_, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
UpperCAmelCase__ =nn.Conv(
A_, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, A_, A_, A_, A_, A_ = 1.0, A_ = True, A_ = False, ) -> Union[FlaxControlNetOutput, Tuple]:
UpperCAmelCase__ =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase__ =jnp.flip(A_, axis=1 )
# 1. time
if not isinstance(A_, jnp.ndarray ):
UpperCAmelCase__ =jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(A_, jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase__ =timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase__ =jnp.expand_dims(A_, 0 )
UpperCAmelCase__ =self.time_proj(A_ )
UpperCAmelCase__ =self.time_embedding(A_ )
# 2. pre-process
UpperCAmelCase__ =jnp.transpose(A_, (0, 2, 3, 1) )
UpperCAmelCase__ =self.conv_in(A_ )
UpperCAmelCase__ =jnp.transpose(A_, (0, 2, 3, 1) )
UpperCAmelCase__ =self.controlnet_cond_embedding(A_ )
sample += controlnet_cond
# 3. down
UpperCAmelCase__ =(sample,)
for down_block in self.down_blocks:
if isinstance(A_, A_ ):
UpperCAmelCase__ , UpperCAmelCase__ =down_block(A_, A_, A_, deterministic=not train )
else:
UpperCAmelCase__ , UpperCAmelCase__ =down_block(A_, A_, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase__ =self.mid_block(A_, A_, A_, deterministic=not train )
# 5. contronet blocks
UpperCAmelCase__ =()
for down_block_res_sample, controlnet_block in zip(A_, self.controlnet_down_blocks ):
UpperCAmelCase__ =controlnet_block(A_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase__ =controlnet_down_block_res_samples
UpperCAmelCase__ =self.controlnet_mid_block(A_ )
# 6. scaling
UpperCAmelCase__ =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A_, mid_block_res_sample=A_ )
| 625 | 0 |
"""simple docstring"""
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowerCAmelCase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int:
for pegasus_name, hf_name in PATTERNS:
_SCREAMING_SNAKE_CASE : Optional[int] = k.replace(__A , __A )
return k
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> PegasusForConditionalGeneration:
_SCREAMING_SNAKE_CASE : List[str] = DEFAULTS.copy()
cfg_kwargs.update(__A )
_SCREAMING_SNAKE_CASE : Dict = PegasusConfig(**__A )
_SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(__A )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.model.state_dict()
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, v in tf_weights.items():
_SCREAMING_SNAKE_CASE : Dict = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_SCREAMING_SNAKE_CASE : Union[str, Any] = v.T
_SCREAMING_SNAKE_CASE : Any = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_SCREAMING_SNAKE_CASE : int = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] )
_SCREAMING_SNAKE_CASE : Dict = mapping['''shared.weight''']
_SCREAMING_SNAKE_CASE : List[str] = mapping['''shared.weight''']
_SCREAMING_SNAKE_CASE : Optional[int] = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping}
mapping.update(**__A )
_SCREAMING_SNAKE_CASE : List[str] = torch_model.model.load_state_dict(__A , strict=__A )
_SCREAMING_SNAKE_CASE : Optional[int] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase_(__SCREAMING_SNAKE_CASE="./ckpt/aeslc/model.ckpt-32000" )-> Dict:
_SCREAMING_SNAKE_CASE : Dict = tf.train.list_variables(__A )
_SCREAMING_SNAKE_CASE : str = {}
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(__A , desc="""converting tf checkpoint to dict""" ):
_SCREAMING_SNAKE_CASE : int = any(pat in name for pat in ignore_name )
if skip_key:
continue
_SCREAMING_SNAKE_CASE : List[Any] = tf.train.load_variable(__A , __A )
_SCREAMING_SNAKE_CASE : Tuple = array
return tf_weights
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict:
_SCREAMING_SNAKE_CASE : Any = Path(__A ).parent.name
_SCREAMING_SNAKE_CASE : List[Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
_SCREAMING_SNAKE_CASE : List[str] = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
_SCREAMING_SNAKE_CASE : str = get_tf_weights_as_numpy(__A )
_SCREAMING_SNAKE_CASE : List[Any] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
_SCREAMING_SNAKE_CASE : Tuple = task_specific_params
_SCREAMING_SNAKE_CASE : Optional[Any] = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
_SCREAMING_SNAKE_CASE : int = torch_model.state_dict()
sd.pop("""model.decoder.embed_positions.weight""" )
sd.pop("""model.encoder.embed_positions.weight""" )
torch.save(__A , Path(__A ) / """pytorch_model.bin""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
if args.save_dir is None:
lowerCAmelCase_ = Path(args.tf_ckpt_path).parent.name
lowerCAmelCase_ = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 708 | """simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "facebook/bart-large-mnli"
a = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a = "text_classifier"
a = AutoTokenizer
a = AutoModelForSequenceClassification
a = ["text", ["text"]]
a = ["text"]
def _lowerCAmelCase ( self : int):
"""simple docstring"""
super().setup()
_SCREAMING_SNAKE_CASE : Any = self.model.config
_SCREAMING_SNAKE_CASE : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail"""):
_SCREAMING_SNAKE_CASE : List[Any] = int(_A)
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""")
def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = labels
return self.pre_processor(
[text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = outputs.logits
_SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 635 | 0 |
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase = concatenate_datasets
__lowerCamelCase = DownloadConfig
__lowerCamelCase = DownloadManager
__lowerCamelCase = DownloadMode
__lowerCamelCase = DownloadConfig
__lowerCamelCase = DownloadMode
__lowerCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 96 |
from itertools import count
def _A ( SCREAMING_SNAKE_CASE : int = 50 ):
"""simple docstring"""
a__ : Union[str, Any] =[1] * min_block_length
for n in count(SCREAMING_SNAKE_CASE ):
fill_count_functions.append(1 )
for block_length in range(SCREAMING_SNAKE_CASE , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 563 | 0 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 718 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ : List[str] = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
"""simple docstring"""
_lowercase : str
_lowercase : List[str]
_lowercase : Optional[List[str]]
@dataclass
class __UpperCamelCase :
"""simple docstring"""
_lowercase : List[int]
_lowercase : List[int]
_lowercase : Optional[List[int]] = None
_lowercase : Optional[List[int]] = None
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : Any = '''train'''
_lowercase : Any = '''dev'''
_lowercase : Optional[Any] = '''test'''
class __UpperCamelCase :
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE ) -> List[str]:
raise NotImplementedError
@staticmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=-1_0_0 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=True , ) -> List[InputFeatures]:
a__ = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
a__ = []
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d of %d''' , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
a__ = []
a__ = []
for word, label in zip(example.words , example.labels ):
a__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
a__ = tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
a__ = tokens[: (max_seq_length - special_tokens_count)]
a__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
a__ = [sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
a__ = [cls_token] + tokens
a__ = [pad_token_label_id] + label_ids
a__ = [cls_token_segment_id] + segment_ids
a__ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
a__ = [1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
a__ = max_seq_length - len(SCREAMING_SNAKE_CASE )
if pad_on_left:
a__ = ([pad_token] * padding_length) + input_ids
a__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
a__ = ([pad_token_segment_id] * padding_length) + segment_ids
a__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
a__ = None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __UpperCamelCase ( _lowercase ):
"""simple docstring"""
_lowercase : List[InputFeatures]
_lowercase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE = Split.train , ) -> List[str]:
# Load data features from cache or dataset file
a__ = os.path.join(
SCREAMING_SNAKE_CASE , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a__ = cached_features_file + '''.lock'''
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
a__ = torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
a__ = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
a__ = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , SCREAMING_SNAKE_CASE ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __UpperCamelCase :
"""simple docstring"""
_lowercase : List[InputFeatures]
_lowercase : int = -1_00
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE = Split.train , ) -> Union[str, Any]:
a__ = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
a__ = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
a__ = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
a__ = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _UpperCAmelCase ( self ) -> int:
a__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> int:
return len(self.features )
def __getitem__( self , SCREAMING_SNAKE_CASE ) -> InputFeatures:
return self.features[i]
| 148 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case__ : List[Any] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'speechbrain/m-ctc-t-large': 'https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "mctct"
def __init__( self : Union[str, Any] , __a : int=8_065 , __a : Optional[Any]=1_536 , __a : int=36 , __a : Tuple=6_144 , __a : Optional[Any]=4 , __a : str=384 , __a : int=920 , __a : Union[str, Any]=1e-5 , __a : Tuple=0.3 , __a : List[Any]="relu" , __a : int=0.02 , __a : Optional[int]=0.3 , __a : Optional[int]=0.3 , __a : Any=1 , __a : str=0 , __a : Union[str, Any]=2 , __a : str=1 , __a : Optional[int]=0.3 , __a : List[Any]=1 , __a : List[Any]=(7,) , __a : List[str]=(3,) , __a : Any=80 , __a : Union[str, Any]=1 , __a : Dict=None , __a : str="sum" , __a : Optional[Any]=False , **__a : Dict , ) ->Optional[Any]:
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
lowerCamelCase_ : int = vocab_size
lowerCamelCase_ : Union[str, Any] = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : Optional[int] = intermediate_size
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : str = attention_head_dim
lowerCamelCase_ : Tuple = max_position_embeddings
lowerCamelCase_ : Dict = layer_norm_eps
lowerCamelCase_ : Union[str, Any] = layerdrop
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : Optional[int] = initializer_range
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Optional[Any] = bos_token_id
lowerCamelCase_ : Any = eos_token_id
lowerCamelCase_ : Optional[int] = conv_glu_dim
lowerCamelCase_ : Union[str, Any] = conv_dropout
lowerCamelCase_ : int = num_conv_layers
lowerCamelCase_ : List[Any] = input_feat_per_channel
lowerCamelCase_ : Union[str, Any] = input_channels
lowerCamelCase_ : int = conv_channels
lowerCamelCase_ : Optional[int] = ctc_loss_reduction
lowerCamelCase_ : Optional[int] = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowerCamelCase_ : List[Any] = list(__a )
lowerCamelCase_ : Any = list(__a )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
F'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
F'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 278 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
snake_case__ : Dict = logging.getLogger(__name__)
def __lowerCamelCase ( ) -> Any:
lowerCamelCase_ : str = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=A__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=A__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=A__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=A__ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=A__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=A__ , type=A__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=A__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=A__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
lowerCamelCase_ : Optional[int] = parser.parse_args()
return args
def __lowerCamelCase ( A__ : Tuple ) -> str:
def fn(A__ : Dict ):
return tokenizer(examples["""text"""] )
return fn
def __lowerCamelCase ( A__ : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase_ : Dict = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
lowerCamelCase_ : List[str] = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
lowerCamelCase_ : Optional[int] = tf.train.Features(feature=A__ )
lowerCamelCase_ : Tuple = tf.train.Example(features=A__ )
lowerCamelCase_ : Optional[Any] = example.SerializeToString()
records.append(A__ )
return records
def __lowerCamelCase ( A__ : Tuple ) -> Union[str, Any]:
lowerCamelCase_ : int = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCamelCase_ : str = min(len(A__ ) , args.limit )
lowerCamelCase_ : Dict = dataset.select(range(A__ ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
lowerCamelCase_ : str = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCamelCase_ : Optional[int] = os.path.join(args.output_dir , args.split )
if not os.path.exists(A__ ):
os.makedirs(A__ )
else:
lowerCamelCase_ : Any = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCamelCase_ : List[str] = tokenize_function(A__ )
lowerCamelCase_ : List[Any] = dataset.map(A__ , batched=A__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A__ : Union[str, Any] ):
# Concatenate all texts.
lowerCamelCase_ : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCamelCase_ : Union[str, Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCamelCase_ : Union[str, Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCamelCase_ : List[str] = {
k: [t[i : i + args.max_length] for i in range(0 , A__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCamelCase_ : Optional[int] = dataset_tokenized.map(A__ , batched=A__ , batch_size=1000 , num_proc=4 )
lowerCamelCase_ : List[str] = 0
lowerCamelCase_ : Tuple = 0
for shard in range(0 , len(A__ ) , args.shard_size ):
lowerCamelCase_ : Union[str, Any] = grouped_dataset[shard : shard + args.shard_size]
lowerCamelCase_ : Optional[int] = len(dataset_snapshot["""input_ids"""] )
lowerCamelCase_ : List[str] = os.path.join(A__ , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
lowerCamelCase_ : Optional[int] = get_serialized_examples(A__ )
with tf.io.TFRecordWriter(A__ ) as out_file:
for i in range(len(A__ ) ):
lowerCamelCase_ : Dict = serialized_examples[i]
out_file.write(A__ )
print("""Wrote file {} containing {} records""".format(A__ , A__ ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=A__ )
if __name__ == "__main__":
snake_case__ : str = parse_args()
main(args)
| 278 | 1 |
'''simple docstring'''
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ) -> None:
"""simple docstring"""
lowerCamelCase_ : str = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCamelCase_ : Optional[int] = Vector()
def __UpperCamelCase ( self : List[str] ) -> None:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCamelCase_ ) , '''(0,0,0,0,0,1)''' )
def __UpperCamelCase ( self : List[Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ : str = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCamelCase_ ) , 4 )
def __UpperCamelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ : int = Vector([1, 2] )
lowerCamelCase_ : Any = Vector([1, 2, 3, 4, 5] )
lowerCamelCase_ : Union[str, Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCamelCase_ : List[str] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def __UpperCamelCase ( self : str ) -> None:
"""simple docstring"""
lowerCamelCase_ : Any = Vector([1, 2, 3] )
lowerCamelCase_ : Tuple = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __UpperCamelCase ( self : List[str] ) -> None:
"""simple docstring"""
lowerCamelCase_ : List[str] = Vector([1, 2, 3] )
lowerCamelCase_ : List[Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __UpperCamelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ : List[str] = Vector([1, 2, 3] )
lowerCamelCase_ : str = Vector([2, -1, 4] ) # for test of dot product
lowerCamelCase_ : List[Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def __UpperCamelCase ( self : List[str] ) -> None:
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def __UpperCamelCase ( self : str ) -> None:
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def __UpperCamelCase ( self : List[Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = Vector([1, 2, 3] )
lowerCamelCase_ : List[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCamelCase_ , UpperCamelCase_ ) ) , '''(3,4,7)''' )
def __UpperCamelCase ( self : str ) -> None:
"""simple docstring"""
lowerCamelCase_ : int = Vector([1, 0, 0, 0, 0, 0] )
lowerCamelCase_ : Dict = x.copy()
self.assertEqual(str(UpperCamelCase_ ) , str(UpperCamelCase_ ) )
def __UpperCamelCase ( self : Dict ) -> None:
"""simple docstring"""
lowerCamelCase_ : str = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCamelCase_ ) , '''(0,1,0)''' )
def __UpperCamelCase ( self : Union[str, Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase_ ) )
def __UpperCamelCase ( self : int ) -> None:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ : Optional[Any] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCamelCase_ , UpperCamelCase_ ) )
def __UpperCamelCase ( self : Dict ) -> None:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ : List[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCamelCase_ , UpperCamelCase_ ) )
def __UpperCamelCase ( self : List[Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __UpperCamelCase ( self : str ) -> None:
"""simple docstring"""
lowerCamelCase_ : List[Any] = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCamelCase_ : Union[str, Any] = Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def __UpperCamelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(UpperCamelCase_ ) )
def __UpperCamelCase ( self : Dict ) -> None:
"""simple docstring"""
lowerCamelCase_ : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __UpperCamelCase ( self : Optional[Any] ) -> None:
"""simple docstring"""
lowerCamelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ : Tuple = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def __UpperCamelCase ( self : List[str] ) -> None:
"""simple docstring"""
lowerCamelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase_ : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def __UpperCamelCase ( self : int ) -> None:
"""simple docstring"""
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 418 |
'''simple docstring'''
def __snake_case (__UpperCAmelCase = 3 , __UpperCAmelCase = 7 , __UpperCAmelCase = 1000000 ):
"""simple docstring"""
lowerCamelCase_ : Any = 0
lowerCamelCase_ : Tuple = 1
for current_denominator in range(1 , limit + 1 ):
lowerCamelCase_ : str = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCamelCase_ : Any = current_numerator
lowerCamelCase_ : Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 418 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Union[str, Any] = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 8 |
'''simple docstring'''
from math import factorial
def __snake_case ( _UpperCAmelCase : int = 100):
return sum(map(_UpperCAmelCase, str(factorial(_UpperCAmelCase))))
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 212 | 0 |
snake_case : Any = {str(digit): digit**5 for digit in range(1_0)}
def snake_case__ ( __lowercase ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(__lowercase ) )
def snake_case__ ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(__lowercase ) )
if __name__ == "__main__":
print(solution())
| 709 |
import requests
snake_case : int = '' # <-- Put your OpenWeatherMap appid here!
snake_case : int = 'https://api.openweathermap.org/data/2.5/'
def snake_case__ ( __lowercase = "Chicago" , __lowercase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def snake_case__ ( __lowercase = "Kolkata, India" , __lowercase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def snake_case__ ( __lowercase = 55.68 , __lowercase = 12.57 , __lowercase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
snake_case : int = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break | 182 | 0 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
snake_case : Optional[Any] = 300 # TEMPERATURE (unit = K)
def lowercase__ ( __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : float , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 566 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__ ( __UpperCamelCase : int ):
'''simple docstring'''
return EnvironmentCommand()
def lowercase__ ( __UpperCamelCase : Tuple ):
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class lowerCamelCase__( snake_case_ ):
@staticmethod
def __magic_name__ ( __UpperCAmelCase ):
"""simple docstring"""
__lowercase = parser.add_parser("""env""" )
download_parser.set_defaults(func=__UpperCAmelCase )
download_parser.add_argument(
"""--accelerate-config_file""" , default=__UpperCAmelCase , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=__UpperCAmelCase )
def __init__( self , __UpperCAmelCase , *__UpperCAmelCase ):
"""simple docstring"""
__lowercase = accelerate_config_file
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = """not installed"""
if is_safetensors_available():
import safetensors
__lowercase = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
__lowercase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__lowercase = """not installed"""
__lowercase = __lowercase = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__lowercase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(__UpperCAmelCase ):
__lowercase = load_config_from_file(self._accelerate_config_file ).to_dict()
__lowercase = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else F'''\t{accelerate_config}'''
)
__lowercase = """not installed"""
__lowercase = """NA"""
if is_torch_available():
import torch
__lowercase = torch.__version__
__lowercase = torch.cuda.is_available()
__lowercase = """not installed"""
__lowercase = """NA"""
if is_tf_available():
import tensorflow as tf
__lowercase = tf.__version__
try:
# deprecated in v2.1
__lowercase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__lowercase = bool(tf.config.list_physical_devices("""GPU""" ) )
__lowercase = """not installed"""
__lowercase = """not installed"""
__lowercase = """not installed"""
__lowercase = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
__lowercase = flax.__version__
__lowercase = jax.__version__
__lowercase = jaxlib.__version__
__lowercase = jax.lib.xla_bridge.get_backend().platform
__lowercase = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F'''{safetensors_version}''',
"""Accelerate version""": F'''{accelerate_version}''',
"""Accelerate config""": F'''{accelerate_config_str}''',
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""Tensorflow version (GPU?)""": F'''{tf_version} ({tf_cuda_available})''',
"""Flax version (CPU?/GPU?/TPU?)""": F'''{flax_version} ({jax_backend})''',
"""Jax version""": F'''{jax_version}''',
"""JaxLib version""": F'''{jaxlib_version}''',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__UpperCAmelCase ) )
return info
@staticmethod
def __magic_name__ ( __UpperCAmelCase ):
"""simple docstring"""
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 566 | 1 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class A_ :
def __init__( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any]=13 , snake_case__ : List[Any]=7 , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : int=True , snake_case__ : List[str]=True , snake_case__ : Tuple=99 , snake_case__ : Optional[Any]=64 , snake_case__ : int=5 , snake_case__ : List[Any]=4 , snake_case__ : Optional[Any]=37 , snake_case__ : Optional[int]="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Dict=5_12 , snake_case__ : str=16 , snake_case__ : Any=2 , snake_case__ : Union[str, Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : str=None , ):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
lowercase = vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE__ ( self : int ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = self.prepare_config_and_inputs()
lowercase = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Any ):
lowercase = GPTNeoXModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ )
lowercase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ):
lowercase = True
lowercase = GPTNeoXModel(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
lowercase = GPTNeoXForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any ):
lowercase = self.num_labels
lowercase = GPTNeoXForQuestionAnswering(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : str ):
lowercase = self.num_labels
lowercase = GPTNeoXForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Any , snake_case__ : int ):
lowercase = self.num_labels
lowercase = GPTNeoXForTokenClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Any ):
lowercase = True
lowercase = GPTNeoXForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
lowercase = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ )
lowercase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase = model(snake_case__ , attention_mask=snake_case__ , output_hidden_states=snake_case__ )
lowercase = output_from_no_past["hidden_states"][0]
lowercase = model(
snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )["hidden_states"][0]
# select random slice
lowercase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = self.prepare_config_and_inputs()
lowercase = config_and_inputs
lowercase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
_A :List[str] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
_A :Dict = (GPTNeoXForCausalLM,) if is_torch_available() else ()
_A :Optional[int] = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
_A :Any = False
_A :List[str] = False
_A :Any = False
_A :Tuple = False
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowercase = GPTNeoXModelTester(self )
lowercase = ConfigTester(self , config_class=snake_case__ , hidden_size=64 , num_attention_heads=8 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case__ , snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
# This regression test was failing with PyTorch < 1.3
lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase = None
self.model_tester.create_and_check_model_as_decoder(snake_case__ , snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case__ , snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__ )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case__ : Optional[Any] ):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ids_tensor([1, 10] , config.vocab_size )
lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = GPTNeoXModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
lowercase = original_model(snake_case__ ).last_hidden_state
lowercase = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase = {"type": scaling_type, "factor": 10.0}
lowercase = GPTNeoXModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
lowercase = scaled_model(snake_case__ ).last_hidden_state
lowercase = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1E-5 ) )
@require_torch
class A_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
lowercase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case__ )
lowercase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(snake_case__ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
lowercase = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
lowercase = model.generate(**snake_case__ , do_sample=snake_case__ , max_new_tokens=20 )
lowercase = tokenizer.batch_decode(snake_case__ )[0]
self.assertEqual(snake_case__ , snake_case__ )
| 714 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=True ):
model.train()
lowercase = model(lowerCAmelCase__ )
lowercase = F.mse_loss(lowerCAmelCase__ ,target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__=False ):
set_seed(42 )
lowercase = RegressionModel()
lowercase = deepcopy(lowerCAmelCase__ )
lowercase = RegressionDataset(length=80 )
lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase = AdamW(params=model.parameters() ,lr=1E-3 )
lowercase = AdamW(params=ddp_model.parameters() ,lr=1E-3 )
lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 )
lowercase = LambdaLR(lowerCAmelCase__ ,lr_lambda=lambda lowerCAmelCase__ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowercase , lowercase , lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
else:
lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCamelCase__ ( lowerCAmelCase__ ):
# Test when on a single CPU or GPU that the context manager does nothing
lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ )
# Use a single batch
lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
else:
# Sync grads
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad ,ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
def UpperCamelCase__ ( lowerCAmelCase__ ):
# Test on distributed setup that context manager behaves properly
lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ )
# Use a single batch
lowercase , lowercase = next(iter(lowerCAmelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
else:
# Sync grads
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowercase = Accelerator(
split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ )
for iteration, batch in enumerate(lowerCAmelCase__ ):
lowercase , lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase = ddp_input[torch.randperm(len(lowerCAmelCase__ ) )]
GradientState._reset_state()
def UpperCamelCase__ ( lowerCAmelCase__=False ,lowerCAmelCase__=False ):
lowercase = Accelerator(
split_batches=lowerCAmelCase__ ,dispatch_batches=lowerCAmelCase__ ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = get_training_setup(lowerCAmelCase__ ,lowerCAmelCase__ )
for iteration, batch in enumerate(lowerCAmelCase__ ):
lowercase , lowercase = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase , lowercase = accelerator.gather((ddp_input, ddp_target) )
lowercase , lowercase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase__ ):
step_model(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
lowercase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def UpperCamelCase__ ( ):
lowercase = Accelerator()
lowercase = RegressionDataset(length=80 )
lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 )
lowercase = RegressionDataset(length=96 )
lowercase = DataLoader(lowerCAmelCase__ ,batch_size=16 )
lowercase , lowercase = accelerator.prepare(lowerCAmelCase__ ,lowerCAmelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ )
if iteration < len(lowerCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase__ )
if batch_num < len(lowerCAmelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCamelCase__ ( ):
lowercase = Accelerator()
lowercase = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(lowerCAmelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(lowerCAmelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,)
test_gradient_accumulation(lowerCAmelCase__ ,lowerCAmelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" ,"""2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,"""`split_batches=False`, `dispatch_batches=False`**""" ,)
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ ,f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,)
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 72 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __A( unittest.TestCase ):
@property
def lowercase__ ( self : Optional[int] ):
torch.manual_seed(0 )
lowerCamelCase_ = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowercase__ ( self : Dict ):
torch.manual_seed(0 )
lowerCamelCase_ = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowercase__ ( self : Tuple ):
torch.manual_seed(0 )
lowerCamelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(UpperCamelCase__ )
def lowercase__ ( self : int ):
lowerCamelCase_ = self.dummy_uncond_unet
lowerCamelCase_ = DDIMScheduler()
lowerCamelCase_ = self.dummy_vq_model
lowerCamelCase_ = LDMPipeline(unet=UpperCamelCase__ , vqvae=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ldm.to(UpperCamelCase__ )
ldm.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = ldm(generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""numpy""" ).images
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = ldm(generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""numpy""" , return_dict=UpperCamelCase__ )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase_ = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
lowerCamelCase_ = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __A( unittest.TestCase ):
def lowercase__ ( self : str ):
lowerCamelCase_ = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(UpperCamelCase__ )
ldm.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = torch.manual_seed(0 )
lowerCamelCase_ = ldm(generator=UpperCamelCase__ , num_inference_steps=5 , output_type="""numpy""" ).images
lowerCamelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowerCamelCase_ = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
lowerCamelCase_ = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 272 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "tf_padding" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "depth_multiplier" ) )
class __snake_case :
"""simple docstring"""
def __init__( self :Dict , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :Tuple=13 , UpperCamelCase__ :Optional[int]=3 , UpperCamelCase__ :int=32 , UpperCamelCase__ :List[Any]=0.25 , UpperCamelCase__ :Optional[int]=8 , UpperCamelCase__ :List[Any]=True , UpperCamelCase__ :Any=1_024 , UpperCamelCase__ :List[Any]=32 , UpperCamelCase__ :Union[str, Any]="relu6" , UpperCamelCase__ :List[Any]=0.1 , UpperCamelCase__ :Any=0.02 , UpperCamelCase__ :int=True , UpperCamelCase__ :str=True , UpperCamelCase__ :List[Any]=10 , UpperCamelCase__ :Tuple=None , ):
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = depth_multiplier
_a = min_depth
_a = tf_padding
_a = int(last_hidden_size * depth_multiplier )
_a = output_stride
_a = hidden_act
_a = classifier_dropout_prob
_a = use_labels
_a = is_training
_a = num_labels
_a = initializer_range
_a = scope
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self :int , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Dict , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :Union[str, Any] ):
_a = MobileNetVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_a = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ ( self :str , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :List[Any] , UpperCamelCase__ :Any , UpperCamelCase__ :Union[str, Any] ):
_a = self.num_labels
_a = MobileNetVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_a = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :str ):
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : str = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
_a = MobileNetVaModelTester(self )
_a = MobileNetVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV1 does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self :str ):
pass
@unittest.skip(reason="MobileNetV1 does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
pass
@unittest.skip(reason="MobileNetV1 does not output attentions" )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
pass
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(UpperCamelCase__ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
def check_hidden_states_output(UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[str] , UpperCamelCase__ :str ):
_a = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
_a = outputs.hidden_states
_a = 26
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = MobileNetVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __a ( ):
"""simple docstring"""
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(UpperCamelCase__ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_a = model(**UpperCamelCase__ )
# verify the logits
_a = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_a = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 388 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Optional[int] = logging.get_logger(__name__)
__a : int = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : int = '''gpt_neox_japanese'''
def __init__( self : Tuple , __UpperCAmelCase : Tuple=32000 , __UpperCAmelCase : Union[str, Any]=2560 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Union[str, Any]=32 , __UpperCAmelCase : int=4 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : int=1.00 , __UpperCAmelCase : Any=10000 , __UpperCAmelCase : List[str]=2048 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : List[Any]=1E-5 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=31996 , __UpperCAmelCase : Any=31999 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[int]=0.0 , **__UpperCAmelCase : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_multiple_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = rotary_pct
UpperCamelCase_ = rotary_emb_base
UpperCamelCase_ = initializer_range
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = use_cache
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = hidden_dropout
| 559 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class A ( lowerCamelCase_ , lowerCamelCase_ ):
@register_to_config
def __init__( self : Optional[int] , __UpperCAmelCase : int = 128 , __UpperCAmelCase : int = 256 , __UpperCAmelCase : float = 2_000.0 , __UpperCAmelCase : int = 768 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 12 , __UpperCAmelCase : int = 64 , __UpperCAmelCase : int = 2048 , __UpperCAmelCase : float = 0.1 , ) -> List[str]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Sequential(
nn.Linear(__UpperCAmelCase , d_model * 4 , bias=__UpperCAmelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__UpperCAmelCase ) , nn.SiLU() , )
UpperCamelCase_ = nn.Embedding(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = False
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(p=__UpperCAmelCase )
UpperCamelCase_ = nn.ModuleList()
for lyr_num in range(__UpperCAmelCase ):
# FiLM conditional T5 decoder
UpperCamelCase_ = DecoderLayer(d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase )
self.decoders.append(__UpperCAmelCase )
UpperCamelCase_ = TaLayerNorm(__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(p=__UpperCAmelCase )
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
def lowercase__ ( self : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : int ) -> Any:
"""simple docstring"""
UpperCamelCase_ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowercase__ ( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCamelCase_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCamelCase_ = self.conditioning_emb(__UpperCAmelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCamelCase_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCamelCase_ = torch.broadcast_to(
torch.arange(__UpperCAmelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCamelCase_ = self.position_encoding(__UpperCAmelCase )
UpperCamelCase_ = self.continuous_inputs_projection(__UpperCAmelCase )
inputs += position_encodings
UpperCamelCase_ = self.dropout(__UpperCAmelCase )
# decoder: No padding present.
UpperCamelCase_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCamelCase_ = [(x, self.encoder_decoder_mask(__UpperCAmelCase , __UpperCAmelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCamelCase_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCamelCase_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCamelCase_ = lyr(
__UpperCAmelCase , conditioning_emb=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )[0]
UpperCamelCase_ = self.decoder_norm(__UpperCAmelCase )
UpperCamelCase_ = self.post_dropout(__UpperCAmelCase )
UpperCamelCase_ = self.spec_out(__UpperCAmelCase )
return spec_out
class A ( nn.Module ):
def __init__( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : str=1E-6 ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , dropout_rate=__UpperCAmelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__UpperCAmelCase , d_kv=__UpperCAmelCase , num_heads=__UpperCAmelCase , dropout_rate=__UpperCAmelCase , layer_norm_epsilon=__UpperCAmelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase , layer_norm_epsilon=__UpperCAmelCase ) )
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=None , __UpperCAmelCase : int=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.layer[0](
__UpperCAmelCase , conditioning_emb=__UpperCAmelCase , attention_mask=__UpperCAmelCase , )
if encoder_hidden_states is not None:
UpperCamelCase_ = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to(
encoder_hidden_states.dtype )
UpperCamelCase_ = self.layer[1](
__UpperCAmelCase , key_value_states=__UpperCAmelCase , attention_mask=__UpperCAmelCase , )
# Apply Film Conditional Feed Forward layer
UpperCamelCase_ = self.layer[-1](__UpperCAmelCase , __UpperCAmelCase )
return (hidden_states,)
class A ( nn.Module ):
def __init__( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> str:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = TaLayerNorm(__UpperCAmelCase )
UpperCamelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCAmelCase )
UpperCamelCase_ = Attention(query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , out_bias=__UpperCAmelCase , scale_qk=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(__UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[Any]=None , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.layer_norm(__UpperCAmelCase )
if conditioning_emb is not None:
UpperCamelCase_ = self.FiLMLayer(__UpperCAmelCase , __UpperCAmelCase )
# Self-attention block
UpperCamelCase_ = self.attention(__UpperCAmelCase )
UpperCamelCase_ = hidden_states + self.dropout(__UpperCAmelCase )
return hidden_states
class A ( nn.Module ):
def __init__( self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = Attention(query_dim=__UpperCAmelCase , heads=__UpperCAmelCase , dim_head=__UpperCAmelCase , out_bias=__UpperCAmelCase , scale_qk=__UpperCAmelCase )
UpperCamelCase_ = TaLayerNorm(__UpperCAmelCase , eps=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(__UpperCAmelCase )
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Tuple=None , ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.layer_norm(__UpperCAmelCase )
UpperCamelCase_ = self.attention(
__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , attention_mask=attention_mask.squeeze(1 ) , )
UpperCamelCase_ = hidden_states + self.dropout(__UpperCAmelCase )
return layer_output
class A ( nn.Module ):
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = TaDenseGatedActDense(d_model=__UpperCAmelCase , d_ff=__UpperCAmelCase , dropout_rate=__UpperCAmelCase )
UpperCamelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=__UpperCAmelCase )
UpperCamelCase_ = TaLayerNorm(__UpperCAmelCase , eps=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(__UpperCAmelCase )
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None ) -> str:
"""simple docstring"""
UpperCamelCase_ = self.layer_norm(__UpperCAmelCase )
if conditioning_emb is not None:
UpperCamelCase_ = self.film(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase_ = self.DenseReluDense(__UpperCAmelCase )
UpperCamelCase_ = hidden_states + self.dropout(__UpperCAmelCase )
return hidden_states
class A ( nn.Module ):
def __init__( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
UpperCamelCase_ = nn.Dropout(__UpperCAmelCase )
UpperCamelCase_ = NewGELUActivation()
def lowercase__ ( self : List[str] , __UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ = self.act(self.wi_a(__UpperCAmelCase ) )
UpperCamelCase_ = self.wi_a(__UpperCAmelCase )
UpperCamelCase_ = hidden_gelu * hidden_linear
UpperCamelCase_ = self.dropout(__UpperCAmelCase )
UpperCamelCase_ = self.wo(__UpperCAmelCase )
return hidden_states
class A ( nn.Module ):
def __init__( self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : Any=1E-6 ) -> str:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Parameter(torch.ones(__UpperCAmelCase ) )
UpperCamelCase_ = eps
def lowercase__ ( self : Any , __UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__UpperCAmelCase )
UpperCamelCase_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCamelCase_ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class A ( nn.Module ):
def lowercase__ ( self : List[Any] , __UpperCAmelCase : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__UpperCAmelCase , 3.0 )) ))
class A ( nn.Module ):
def __init__( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase_ = nn.Linear(__UpperCAmelCase , out_features * 2 , bias=__UpperCAmelCase )
def lowercase__ ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.scale_bias(__UpperCAmelCase )
UpperCamelCase_ , UpperCamelCase_ = torch.chunk(__UpperCAmelCase , 2 , -1 )
UpperCamelCase_ = x * (1 + scale) + shift
return x
| 559 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __magic_name__ ( __a : int , __a : List[str]=False ):
'''simple docstring'''
UpperCamelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __magic_name__ ( __a : Optional[int] , __a : Tuple , __a : List[str]=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ = """"""
else:
UpperCamelCase__ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ = in_proj_bias[: config.hidden_size]
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( __a : Any ):
'''simple docstring'''
UpperCamelCase__ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__a , __a )
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def __magic_name__ ( __a : int , __a : Any , __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = dct.pop(__a )
UpperCamelCase__ = val
def __magic_name__ ( __a : int , __a : Any ):
'''simple docstring'''
UpperCamelCase__ = ViTMSNConfig()
UpperCamelCase__ = 1_000
UpperCamelCase__ = """datasets/huggingface/label-files"""
UpperCamelCase__ = """imagenet-1k-id2label.json"""
UpperCamelCase__ = json.load(open(hf_hub_download(__a , __a ) , """r""" ) )
UpperCamelCase__ = {int(__a ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCamelCase__ = 384
UpperCamelCase__ = 1_536
UpperCamelCase__ = 6
elif "l16" in checkpoint_url:
UpperCamelCase__ = 1_024
UpperCamelCase__ = 4_096
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
elif "b4" in checkpoint_url:
UpperCamelCase__ = 4
elif "l7" in checkpoint_url:
UpperCamelCase__ = 7
UpperCamelCase__ = 1_024
UpperCamelCase__ = 4_096
UpperCamelCase__ = 24
UpperCamelCase__ = 16
UpperCamelCase__ = 0.1
UpperCamelCase__ = ViTMSNModel(__a )
UpperCamelCase__ = torch.hub.load_state_dict_from_url(__a , map_location="""cpu""" )["""target_encoder"""]
UpperCamelCase__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__a )
UpperCamelCase__ = create_rename_keys(__a , base_model=__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
read_in_q_k_v(__a , __a , base_model=__a )
model.load_state_dict(__a )
model.eval()
UpperCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase__ = Image.open(requests.get(__a , stream=__a ).raw )
UpperCamelCase__ = ViTImageProcessor(
size=config.image_size , image_mean=__a , image_std=__a )
UpperCamelCase__ = image_processor(images=__a , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
UpperCamelCase__ = model(**__a )
UpperCamelCase__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-1.0_915, -1.4_876, -1.1_809]] )
elif "b16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[14.2_889, -18.9_045, 11.7_281]] )
elif "l16" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[41.5_028, -22.8_681, 45.6_475]] )
elif "b4" in checkpoint_url:
UpperCamelCase__ = torch.tensor([[-4.3_868, 5.2_932, -0.4_137]] )
else:
UpperCamelCase__ = torch.tensor([[-0.1_792, -0.6_465, 2.4_263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __a , atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__a )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 513 |
from __future__ import annotations
from collections.abc import Generator
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = 2
while True:
UpperCamelCase__ = factor_map.pop(__a , __a )
if factor:
UpperCamelCase__ = factor + prime
while x in factor_map:
x += factor
UpperCamelCase__ = factor
else:
UpperCamelCase__ = prime
yield prime
prime += 1
def __magic_name__ ( __a : float = 1E1_0 ):
'''simple docstring'''
UpperCamelCase__ = sieve()
UpperCamelCase__ = 1
while True:
UpperCamelCase__ = next(__a )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__a )
n += 2
if __name__ == "__main__":
print(solution())
| 513 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_lowercase : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : List[Any] = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
_lowercase : Any = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
_lowercase : Optional[int] = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class UpperCamelCase__( lowerCAmelCase ):
__magic_name__ : List[Any] = VOCAB_FILES_NAMES
__magic_name__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Any = ElectraTokenizer
def __init__( self : int , lowerCAmelCase : Dict=None , lowerCAmelCase : Any=None , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]="[UNK]" , lowerCAmelCase : Any="[SEP]" , lowerCAmelCase : Dict="[PAD]" , lowerCAmelCase : Tuple="[CLS]" , lowerCAmelCase : str="[MASK]" , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[int]=None , **lowerCAmelCase : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase = getattr(lowerCAmelCase , normalizer_state.pop('''type''' ) )
UpperCAmelCase = do_lower_case
UpperCAmelCase = strip_accents
UpperCAmelCase = tokenize_chinese_chars
UpperCAmelCase = normalizer_class(**lowerCAmelCase )
UpperCAmelCase = do_lower_case
def a__( self : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int=None )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__( self : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
UpperCAmelCase = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 718 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A : List[Any] , A : int , A : List[str] , A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = BigBirdConfig.from_json_file(A )
print(f"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
UpperCAmelCase = BigBirdForQuestionAnswering(A )
else:
UpperCAmelCase = BigBirdForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(A , A , is_trivia_qa=A )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(A )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_lowercase : Any = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 50 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ : List[str] = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : int = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 12 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """gptj"""
_SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : int , UpperCamelCase__ : List[Any]=5_0_4_0_0 , UpperCamelCase__ : int=2_0_4_8 , UpperCamelCase__ : Dict=4_0_9_6 , UpperCamelCase__ : Dict=2_8 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Union[str, Any]=6_4 , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]="gelu_new" , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : str=1E-5 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=5_0_2_5_6 , UpperCamelCase__ : int=5_0_2_5_6 , UpperCamelCase__ : int=False , **UpperCamelCase__ : Tuple , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_embd
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = n_inner
UpperCamelCase = rotary_dim
UpperCamelCase = activation_function
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
super().__init__(
bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__ )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : PretrainedConfig , UpperCamelCase__ : str = "default" , UpperCamelCase__ : List[PatchingSpec] = None , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
super().__init__(UpperCamelCase__ , task=UpperCamelCase__ , patching_specs=UpperCamelCase__ , use_past=UpperCamelCase__ )
if not getattr(self._config , 'pad_token_id' , UpperCamelCase__ ):
# TODO: how to do that better?
UpperCamelCase = 0
@property
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def A ( self : List[str] ):
"""simple docstring"""
return self._config.n_layer
@property
def A ( self : str ):
"""simple docstring"""
return self._config.n_head
def A ( self : List[Any] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCamelCase = super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
UpperCamelCase = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
UpperCamelCase = ordered_inputs['attention_mask'].dtype
UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def A ( self : int ):
"""simple docstring"""
return 1_3
| 430 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_( a__ ):
"""simple docstring"""
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , a__ , )
if isinstance(a__ , torch.Tensor ):
return image
elif isinstance(a__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = image[0].size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
SCREAMING_SNAKE_CASE : List[str] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.concatenate(a__ , axis=0 )
SCREAMING_SNAKE_CASE : int = np.array(a__ ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Any = image.transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE : int = 2.0 * image - 1.0
SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(a__ )
elif isinstance(image[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Any = torch.cat(a__ , dim=0 )
return image
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if isinstance(a__ , torch.Tensor ):
return mask
elif isinstance(a__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE : Optional[int] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = mask[0].size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE : str = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
SCREAMING_SNAKE_CASE : Dict = np.concatenate(a__ , axis=0 )
SCREAMING_SNAKE_CASE : str = mask.astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(a__ )
elif isinstance(mask[0] , torch.Tensor ):
SCREAMING_SNAKE_CASE : Any = torch.cat(a__ , dim=0 )
return mask
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : UNetaDModel
__SCREAMING_SNAKE_CASE : RePaintScheduler
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->List[Any]:
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 250 , _lowerCamelCase = 0.0 , _lowerCamelCase = 10 , _lowerCamelCase = 10 , _lowerCamelCase = None , _lowerCamelCase = "pil" , _lowerCamelCase = True , ) ->Union[ImagePipelineOutput, Tuple]:
SCREAMING_SNAKE_CASE : List[str] = image
SCREAMING_SNAKE_CASE : Union[str, Any] = _preprocess_image(_lowerCamelCase )
SCREAMING_SNAKE_CASE : List[Any] = original_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : List[Any] = _preprocess_mask(_lowerCamelCase )
SCREAMING_SNAKE_CASE : str = mask_image.to(device=self.device , dtype=self.unet.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
SCREAMING_SNAKE_CASE : str = original_image.shape
SCREAMING_SNAKE_CASE : List[Any] = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.device )
SCREAMING_SNAKE_CASE : Union[str, Any] = eta
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.timesteps[0] + 1
SCREAMING_SNAKE_CASE : int = generator[0] if isinstance(_lowerCamelCase , _lowerCamelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
SCREAMING_SNAKE_CASE : Any = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.undo_step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE : str = t
SCREAMING_SNAKE_CASE : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase )
| 333 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ = None , a__ = None , a__ = None , ):
"""simple docstring"""
if config_name_or_path is None:
SCREAMING_SNAKE_CASE : int = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = question_encoder_name_or_path
SCREAMING_SNAKE_CASE : Optional[Any] = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE : List[str] = RagConfig.from_pretrained(a__ )
SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = gen_config
SCREAMING_SNAKE_CASE : List[Any] = question_encoder_config
SCREAMING_SNAKE_CASE : Dict = model_class.from_pretrained_question_encoder_generator(
a__ , a__ , config=a__ )
rag_model.save_pretrained(a__ )
# Sanity check.
model_class.from_pretrained(a__ )
# Save tokenizers.
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(a__ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained(a__ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
a__ : Dict = parser.parse_args()
a__ : Tuple = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 333 | 1 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowerCamelCase (a_ :Tuple , a_ :Union[str, Any] , a_ :str , a_ :Tuple , a_ :Tuple) -> str:
for attribute in key.split('''.'''):
lowercase :List[str] = getattr(a_ , a_)
if weight_type is not None:
lowercase :List[str] = getattr(a_ , a_).shape
else:
lowercase :Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase :Optional[Any] = value
elif weight_type == "weight_g":
lowercase :str = value
elif weight_type == "weight_v":
lowercase :int = value
elif weight_type == "bias":
lowercase :Dict = value
else:
lowercase :Optional[int] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""")
def lowerCamelCase (a_ :Optional[int] , a_ :Optional[Any]) -> str:
lowercase :Any = []
lowercase :int = fairseq_model.state_dict()
lowercase :Optional[Any] = hf_model.feature_extractor
lowercase :Tuple = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase :int = False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase :Optional[Any] = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.''']):
load_adapter(a_ , a_ , a_ , a_)
lowercase :List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]:
lowercase :Tuple = True
if "*" in mapped_key:
lowercase :int = name.split(a_)[0].split('''.''')[-2]
lowercase :Optional[Any] = mapped_key.replace('''*''' , a_)
if "weight_g" in name:
lowercase :List[str] = '''weight_g'''
elif "weight_v" in name:
lowercase :Optional[int] = '''weight_v'''
elif "bias" in name:
lowercase :str = '''bias'''
elif "weight" in name:
lowercase :int = '''weight'''
else:
lowercase :List[str] = None
set_recursively(a_ , a_ , a_ , a_ , a_)
continue
if not is_used:
unused_weights.append(a_)
logger.warning(F"""Unused weights: {unused_weights}""")
def lowerCamelCase (a_ :int , a_ :Tuple , a_ :Tuple , a_ :Dict , a_ :Any) -> List[Any]:
lowercase :int = full_name.split('''conv_layers.''')[-1]
lowercase :Optional[Any] = name.split('''.''')
lowercase :List[Any] = int(items[0])
lowercase :Any = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase :int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase :str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase :List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase :List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
else:
unused_weights.append(a_)
def lowerCamelCase (a_ :List[Any] , a_ :List[str] , a_ :int , a_ :Any) -> Any:
lowercase :Union[str, Any] = full_name.split('''adaptor.''')[-1]
lowercase :Optional[int] = name.split('''.''')
if items[1].isdigit():
lowercase :List[str] = int(items[1])
else:
lowercase :Tuple = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."""
lowercase :Optional[Any] = value
logger.info(F"""Adapter proj layer norm bias was initialized from {full_name}.""")
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."""
lowercase :Dict = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."""
lowercase :Dict = value
logger.info(F"""Adapter proj layer bias was initialized from {full_name}.""")
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."""
lowercase :Optional[int] = value
logger.info(F"""Adapter proj layer weight was initialized from {full_name}.""")
elif isinstance(a_ , a_):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."""
lowercase :List[str] = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""")
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"""{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."""
lowercase :int = value
logger.info(F"""Adapter layer {layer_id} bias was initialized from {full_name}.""")
else:
unused_weights.append(a_)
def lowerCamelCase (a_ :Dict) -> str:
lowercase , lowercase :Any = emb.weight.shape
lowercase :Union[str, Any] = nn.Linear(a_ , a_ , bias=a_)
lowercase :Union[str, Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCamelCase (a_ :List[Any] , a_ :Optional[int] , a_ :Union[str, Any] , a_ :str , a_ :List[str] , a_ :Optional[int] , a_ :Any , a_ :Union[str, Any] , a_ :List[str] , a_ :Optional[Any] , a_ :Any , ) -> str:
lowercase :Optional[Any] = WavaVecaConfig.from_pretrained(
a_ , add_adapter=a_ , adapter_stride=a_ , adapter_kernel_size=a_ , use_auth_token=a_ , output_hidden_size=a_ , )
lowercase :Optional[int] = MBartConfig.from_pretrained(a_)
# load model
lowercase , lowercase , lowercase :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''')[:-1]),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
lowercase :Any = model[0].eval()
# load feature extractor
lowercase :Any = WavaVecaFeatureExtractor.from_pretrained(a_ , use_auth_token=a_)
# set weights for wav2vec2 encoder
lowercase :Any = WavaVecaModel(a_)
recursively_load_weights_wavaveca(model.encoder , a_)
# load decoder weights
lowercase :List[Any] = MBartForCausalLM(a_)
lowercase , lowercase :Tuple = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a_)
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""")
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""")
lowercase :Tuple = SpeechEncoderDecoderModel(encoder=a_ , decoder=a_)
lowercase :Tuple = False
lowercase :Union[str, Any] = MBartaaTokenizer(a_)
tokenizer.save_pretrained(a_)
lowercase :Tuple = hf_wavavec.config.to_dict()
lowercase :Union[str, Any] = tokenizer.pad_token_id
lowercase :Optional[Any] = tokenizer.bos_token_id
lowercase :Optional[int] = tokenizer.eos_token_id
lowercase :Optional[Any] = '''mbart50'''
lowercase :List[str] = '''wav2vec2'''
lowercase :Optional[int] = tokenizer.eos_token_id
lowercase :List[str] = 25_0004
lowercase :List[str] = tokenizer.eos_token_id
lowercase :int = SpeechEncoderDecoderConfig.from_dict(a_)
hf_wavavec.save_pretrained(a_)
feature_extractor.save_pretrained(a_)
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1_024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=250_004, type=int, help='''`decoder_start_token_id` of model config''')
UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 677 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_lowerCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _lowerCAmelCase ( lowercase : Optional[int] , lowercase : tuple , lowercase : Path , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : int=False , ) ->Tuple:
"""simple docstring"""
output_path.parent.mkdir(parents=lowercase , exist_ok=lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , use_external_data_format=lowercase , enable_onnx_checker=lowercase , opset_version=lowercase , )
else:
export(
lowercase , lowercase , f=output_path.as_posix() , input_names=lowercase , output_names=lowercase , dynamic_axes=lowercase , do_constant_folding=lowercase , opset_version=lowercase , )
@torch.no_grad()
def _lowerCAmelCase ( lowercase : str , lowercase : str , lowercase : int , lowercase : bool = False ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase__ = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
lowercase__ = '''cpu'''
lowercase__ = Path(lowercase )
# VAE DECODER
lowercase__ = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
lowercase__ = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase__ = vae_decoder.decode
onnx_export(
lowercase , model_args=(
torch.randn(1 , lowercase , 2_5 , 2_5 ).to(device=lowercase , dtype=lowercase ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=lowercase , )
del vae_decoder
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
_lowerCAmelCase = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 161 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class UpperCAmelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=__A , )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def _snake_case ( self , __A , __A ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
class UpperCAmelCase_ ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=__A , )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def _snake_case ( self , __A , __A ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(__A )
def lowercase_( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def lowercase_( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
@require_beam
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase : int = DummyBeamDataset(cache_dir=__A , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
lowerCamelCase : List[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __A )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __A )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _snake_case ( self ):
"""simple docstring"""
import apache_beam as beam
lowerCamelCase : Union[str, Any] = beam.io.parquetio.WriteToParquet
lowerCamelCase : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase : int = DummyBeamDataset(cache_dir=__A , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
lowerCamelCase : Dict = partial(__A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
__A , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
__A , builder.name , "default" , "0.0.0" , F"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
lowerCamelCase : Optional[Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __A )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(__A , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def _snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase : Optional[int] = DummyBeamDataset(cache_dir=__A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase : List[Any] = NestedBeamDataset(cache_dir=__A , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(__A , builder.name , "default" , "0.0.0" , F"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
lowerCamelCase : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , __A )
self.assertEqual(dset["train"].info.splits["train"].num_examples , __A )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(__A , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 231 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
_snake_case = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85,
7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77,
13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11,
46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86,
1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91,
1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09,
3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61
]
_snake_case = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73,
8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27,
32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47,
72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93,
1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75,
2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65,
4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62
]
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = "whisper"
__A : List[str] = ["past_key_values"]
__A : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __A=5_1865 , __A=80 , __A=6 , __A=4 , __A=6 , __A=4 , __A=1536 , __A=1536 , __A=0.0 , __A=0.0 , __A=5_0257 , __A=True , __A=True , __A="gelu" , __A=256 , __A=0.0 , __A=0.0 , __A=0.0 , __A=0.02 , __A=False , __A=1500 , __A=448 , __A=5_0256 , __A=5_0256 , __A=5_0256 , __A=None , __A=[220, 5_0256] , __A=False , __A=256 , __A=False , __A=0.05 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=0 , __A=7 , **__A , ):
"""simple docstring"""
lowerCamelCase : List[str] = vocab_size
lowerCamelCase : Optional[Any] = num_mel_bins
lowerCamelCase : List[Any] = d_model
lowerCamelCase : List[Any] = encoder_layers
lowerCamelCase : Optional[Any] = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_layers
lowerCamelCase : int = decoder_attention_heads
lowerCamelCase : Any = decoder_ffn_dim
lowerCamelCase : int = encoder_ffn_dim
lowerCamelCase : Union[str, Any] = dropout
lowerCamelCase : Dict = attention_dropout
lowerCamelCase : Optional[Any] = activation_dropout
lowerCamelCase : Optional[Any] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : List[Any] = encoder_layerdrop
lowerCamelCase : str = decoder_layerdrop
lowerCamelCase : Optional[Any] = use_cache
lowerCamelCase : Tuple = encoder_layers
lowerCamelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase : str = max_source_positions
lowerCamelCase : List[str] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCamelCase : Optional[int] = classifier_proj_size
lowerCamelCase : List[str] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Optional[int] = apply_spec_augment
lowerCamelCase : str = mask_time_prob
lowerCamelCase : Union[str, Any] = mask_time_length
lowerCamelCase : Union[str, Any] = mask_time_min_masks
lowerCamelCase : Any = mask_feature_prob
lowerCamelCase : Any = mask_feature_length
lowerCamelCase : Dict = mask_feature_min_masks
lowerCamelCase : List[str] = median_filter_width
super().__init__(
pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , suppress_tokens=__A , begin_suppress_tokens=__A , **__A , )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase : List[str] = {0: "batch"}
else:
lowerCamelCase : str = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__A , direction="inputs" )
return common_inputs
def _snake_case ( self , __A , __A = -1 , __A = -1 , __A = False , __A = None , __A = 2_2050 , __A = 5.0 , __A = 220 , ):
"""simple docstring"""
lowerCamelCase : Any = OrderedDict()
lowerCamelCase : List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__A , framework=__A , sampling_rate=__A , time_duration=__A , frequency=__A , )
lowerCamelCase : Any = encoder_inputs["input_features"].shape[2]
lowerCamelCase : Optional[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCamelCase : str = super().generate_dummy_inputs(
preprocessor.tokenizer , __A , __A , __A , __A )
lowerCamelCase : List[Any] = encoder_inputs.pop("input_features" )
lowerCamelCase : Dict = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowerCamelCase : List[str] = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-3
| 231 | 1 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def A_ ( snake_case__ ) -> List[Any]:
return EnvironmentCommand()
def A_ ( snake_case__ ) -> Any:
return EnvironmentCommand(args.accelerate_config_file )
class A( lowerCamelCase__ ):
"""simple docstring"""
@staticmethod
def _UpperCamelCase( SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
_UpperCamelCase :str = parser.add_parser('''env''' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=SCREAMING_SNAKE_CASE__ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = accelerate_config_file
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Dict = '''not installed'''
if is_safetensors_available():
import safetensors
_UpperCamelCase :Dict = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
_UpperCamelCase :Dict = f"{safetensors.__version__} but is ignored because of PyTorch version too old."
_UpperCamelCase :List[Any] = '''not installed'''
_UpperCamelCase :str = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_UpperCamelCase :Union[str, Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :List[str] = load_config_from_file(self._accelerate_config_file ).to_dict()
_UpperCamelCase :Optional[Any] = (
'''\n'''.join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else f"\t{accelerate_config}"
)
_UpperCamelCase :Optional[int] = '''not installed'''
_UpperCamelCase :List[str] = '''NA'''
if is_torch_available():
import torch
_UpperCamelCase :Optional[Any] = torch.__version__
_UpperCamelCase :List[str] = torch.cuda.is_available()
_UpperCamelCase :str = '''not installed'''
_UpperCamelCase :int = '''NA'''
if is_tf_available():
import tensorflow as tf
_UpperCamelCase :int = tf.__version__
try:
# deprecated in v2.1
_UpperCamelCase :Tuple = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_UpperCamelCase :List[Any] = bool(tf.config.list_physical_devices('''GPU''' ) )
_UpperCamelCase :List[Any] = '''not installed'''
_UpperCamelCase :List[Any] = '''not installed'''
_UpperCamelCase :List[str] = '''not installed'''
_UpperCamelCase :Optional[int] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
_UpperCamelCase :Tuple = flax.__version__
_UpperCamelCase :str = jax.__version__
_UpperCamelCase :List[Any] = jaxlib.__version__
_UpperCamelCase :Any = jax.lib.xla_bridge.get_backend().platform
_UpperCamelCase :List[str] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f"{safetensors_version}",
'''Accelerate version''': f"{accelerate_version}",
'''Accelerate config''': f"{accelerate_config_str}",
'''PyTorch version (GPU?)''': f"{pt_version} ({pt_cuda_available})",
'''Tensorflow version (GPU?)''': f"{tf_version} ({tf_cuda_available})",
'''Flax version (CPU?/GPU?/TPU?)''': f"{flax_version} ({jax_backend})",
'''Jax version''': f"{jax_version}",
'''JaxLib version''': f"{jaxlib_version}",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(SCREAMING_SNAKE_CASE__ ) )
return info
@staticmethod
def _UpperCamelCase( SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 355 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def A_ ( snake_case__ ) -> str:
return 1 / (1 + np.exp(-z ))
def A_ ( snake_case__ , snake_case__ ) -> str:
return (-y * np.log(snake_case__ ) - (1 - y) * np.log(1 - h )).mean()
def A_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
_UpperCamelCase :Tuple = np.dot(snake_case__ , snake_case__ )
return np.sum(y * scores - np.log(1 + np.exp(snake_case__ ) ) )
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=7_00_00 ) -> Optional[int]:
_UpperCamelCase :Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(snake_case__ ):
_UpperCamelCase :Optional[int] = np.dot(snake_case__ , snake_case__ )
_UpperCamelCase :Tuple = sigmoid_function(snake_case__ )
_UpperCamelCase :Optional[Any] = np.dot(x.T , h - y ) / y.size
_UpperCamelCase :int = theta - alpha * gradient # updating the weights
_UpperCamelCase :Union[str, Any] = np.dot(snake_case__ , snake_case__ )
_UpperCamelCase :Dict = sigmoid_function(snake_case__ )
_UpperCamelCase :Optional[int] = cost_function(snake_case__ , snake_case__ )
if iterations % 1_00 == 0:
print(f"loss: {j} \t" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCamelCase__ :Union[str, Any] = datasets.load_iris()
UpperCamelCase__ :Dict = iris.data[:, :2]
UpperCamelCase__ :Any = (iris.target != 0) * 1
UpperCamelCase__ :Tuple = 0.1
UpperCamelCase__ :List[str] = logistic_reg(alpha, x, y, max_iterations=70_000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def A_ ( snake_case__ ) -> Optional[Any]:
return sigmoid_function(
np.dot(snake_case__ , snake_case__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((UpperCamelCase__) , (UpperCamelCase__)) :Union[str, Any] = (x[:, 0].min(), x[:, 0].max())
((UpperCamelCase__) , (UpperCamelCase__)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((UpperCamelCase__) , (UpperCamelCase__)) :Tuple = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCamelCase__ :List[Any] = np.c_[xxa.ravel(), xxa.ravel()]
UpperCamelCase__ :List[Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 355 | 1 |
import collections
import importlib.util
import os
import re
from pathlib import Path
_a : List[str] = 'src/transformers'
# Matches is_xxx_available()
_a : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
_a : Tuple = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_a : List[Any] = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
_a : Optional[int] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
_a : Dict = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_a : Any = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
_a : int = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
_a : int = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
_a : List[str] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
_a : Optional[Any] = re.compile(R"^\s*try:")
# Catches a line with else:
_a : Tuple = re.compile(R"^\s*else:")
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
if _re_test_backend.search(__UpperCamelCase ) is None:
return None
__UpperCAmelCase : Dict = [b[0] for b in _re_backend.findall(__UpperCamelCase )]
backends.sort()
return "_and_".join(__UpperCamelCase )
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
with open(__UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__UpperCAmelCase : str = f.readlines()
__UpperCAmelCase : Optional[Any] = 0
while line_index < len(__UpperCamelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
__UpperCAmelCase : Union[str, Any] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__UpperCAmelCase : Optional[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__UpperCamelCase ):
__UpperCAmelCase : Dict = _re_one_line_import_struct.search(__UpperCamelCase ).groups()[0]
__UpperCAmelCase : str = re.findall("\[([^\]]+)\]" , __UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__UpperCAmelCase : Union[str, Any] = _re_import_struct_key_value.search(__UpperCamelCase )
if single_line_import_search is not None:
__UpperCAmelCase : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__UpperCAmelCase : List[Any] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__UpperCAmelCase : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCAmelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__UpperCAmelCase : Tuple = lines[line_index]
if _re_import_struct_add_one.search(__UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__UpperCamelCase ) is not None:
__UpperCAmelCase : Dict = _re_import_struct_add_many.search(__UpperCamelCase ).groups()[0].split(", " )
__UpperCAmelCase : Optional[Any] = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_between_brackets.search(__UpperCamelCase ) is not None:
__UpperCAmelCase : int = _re_between_brackets.search(__UpperCamelCase ).groups()[0].split(", " )
__UpperCAmelCase : str = [obj[1:-1] for obj in imports if len(__UpperCamelCase ) > 0]
objects.extend(__UpperCamelCase )
elif _re_quote_object.search(__UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(__UpperCamelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
__UpperCAmelCase : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__UpperCAmelCase : Union[str, Any] = []
while (
line_index < len(__UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__UpperCAmelCase : Optional[int] = lines[line_index]
__UpperCAmelCase : Union[str, Any] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__UpperCAmelCase : str = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
__UpperCAmelCase : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCAmelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCAmelCase : Optional[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__UpperCAmelCase : Tuple = lines[line_index]
__UpperCAmelCase : Optional[Any] = _re_import.search(__UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
__UpperCAmelCase : List[str] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
def find_duplicates(lowerCamelCase__ ):
return [k for k, v in collections.Counter(__UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__UpperCAmelCase : Any = []
for key in import_dict_objects.keys():
__UpperCAmelCase : Optional[int] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__UpperCAmelCase : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__UpperCAmelCase : List[Any] = '''base imports''' if key == '''none''' else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def _lowercase ( ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = []
for root, _, files in os.walk(__UpperCamelCase ):
if "__init__.py" in files:
__UpperCAmelCase : Tuple = os.path.join(__UpperCamelCase , "__init__.py" )
__UpperCAmelCase : str = parse_init(__UpperCamelCase )
if objects is not None:
__UpperCAmelCase : int = analyze_results(*__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
__UpperCAmelCase : Any = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(__UpperCamelCase ) )
if len(__UpperCamelCase ) > 0:
raise ValueError("\n\n".join(__UpperCamelCase ) )
def _lowercase ( ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Any = []
for path, directories, files in os.walk(__UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__UpperCamelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
__UpperCAmelCase : List[str] = str((Path(__UpperCamelCase ) / folder).relative_to(__UpperCamelCase ) )
__UpperCAmelCase : Optional[int] = short_path.replace(os.path.sep , "." )
submodules.append(__UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
__UpperCAmelCase : Union[str, Any] = str((Path(__UpperCamelCase ) / fname).relative_to(__UpperCamelCase ) )
__UpperCAmelCase : List[Any] = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__UpperCamelCase )
return submodules
_a : Tuple = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def _lowercase ( ) -> str:
"""simple docstring"""
__UpperCAmelCase : List[Any] = importlib.util.spec_from_file_location(
"transformers" , os.path.join(__UpperCamelCase , "__init__.py" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__UpperCAmelCase : Optional[Any] = spec.loader.load_module()
__UpperCAmelCase : Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__UpperCamelCase ) > 0:
__UpperCAmelCase : List[Any] = '''\n'''.join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
f"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 710 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=128 , _lowerCAmelCase=32 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ) -> str:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ) -> Dict:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def _snake_case ( self ) -> str:
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = NezhaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Optional[int]:
_lowerCAmelCase = True
_lowerCAmelCase = NezhaModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = NezhaForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = NezhaForNextSentencePrediction(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = NezhaForPreTraining(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , next_sentence_label=_lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = NezhaForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = NezhaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = NezhaForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = NezhaForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : Dict = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase : str = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : str = True
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> Any:
_lowerCAmelCase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase )
if return_labels:
if model_class in get_values(_lowerCAmelCase ):
_lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase )
_lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase )
return inputs_dict
def _snake_case ( self ) -> str:
_lowerCAmelCase = NezhaModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def _snake_case ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowerCAmelCase )
def _snake_case ( self ) -> str:
# This regression test was failing with PyTorch < 1.3
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
_lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def _snake_case ( self ) -> Any:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase )
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
@slow
def _snake_case ( self ) -> Optional[Any]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = NezhaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@slow
@require_torch_gpu
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
_lowerCAmelCase = True
_lowerCAmelCase = model_class(config=_lowerCAmelCase )
_lowerCAmelCase = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = torch.jit.trace(
_lowerCAmelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , "bert.pt" ) )
_lowerCAmelCase = torch.jit.load(os.path.join(_lowerCAmelCase , "bert.pt" ) , map_location=_lowerCAmelCase )
loaded(inputs_dict["input_ids"].to(_lowerCAmelCase ) , inputs_dict["attention_mask"].to(_lowerCAmelCase ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> Tuple:
_lowerCAmelCase = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
_lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
_lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
_lowerCAmelCase = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
| 18 | """simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase ( lowercase_ ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,) -> int:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=__UpperCamelCase ,vae=__UpperCamelCase ,scheduler=__UpperCamelCase )
# create a imagenet -> id dictionary for easier use
lowercase_ : Union[str, Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
lowercase_ : Dict = int(__UpperCamelCase )
lowercase_ : Dict = dict(sorted(self.labels.items() ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]:
'''simple docstring'''
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Tuple = list(__UpperCamelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self ,__UpperCamelCase ,__UpperCamelCase = 4.0 ,__UpperCamelCase = None ,__UpperCamelCase = 50 ,__UpperCamelCase = "pil" ,__UpperCamelCase = True ,) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowercase_ : Dict = len(__UpperCamelCase )
lowercase_ : List[Any] = self.transformer.config.sample_size
lowercase_ : Optional[Any] = self.transformer.config.in_channels
lowercase_ : List[str] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) ,generator=__UpperCamelCase ,device=self.device ,dtype=self.transformer.dtype ,)
lowercase_ : Union[str, Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase_ : Union[str, Any] = torch.tensor(__UpperCamelCase ,device=self.device ).reshape(-1 )
lowercase_ : Dict = torch.tensor([1000] * batch_size ,device=self.device )
lowercase_ : Union[str, Any] = torch.cat([class_labels, class_null] ,0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase_ : Tuple = latent_model_input[: len(__UpperCamelCase ) // 2]
lowercase_ : Union[str, Any] = torch.cat([half, half] ,dim=0 )
lowercase_ : Optional[int] = self.scheduler.scale_model_input(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = t
if not torch.is_tensor(__UpperCamelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase_ : Optional[int] = latent_model_input.device.type == 'mps'
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
lowercase_ : Optional[int] = torch.intaa if is_mps else torch.intaa
lowercase_ : Optional[int] = torch.tensor([timesteps] ,dtype=__UpperCamelCase ,device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase_ : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase_ : Dict = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase_ : Optional[Any] = self.transformer(
__UpperCamelCase ,timestep=__UpperCamelCase ,class_labels=__UpperCamelCase ).sample
# perform guidance
if guidance_scale > 1:
lowercase_ , lowercase_ : int = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase_ , lowercase_ : Optional[int] = torch.split(__UpperCamelCase ,len(__UpperCamelCase ) // 2 ,dim=0 )
lowercase_ : Any = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase_ : Any = torch.cat([half_eps, half_eps] ,dim=0 )
lowercase_ : int = torch.cat([eps, rest] ,dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase_ , lowercase_ : List[Any] = torch.split(__UpperCamelCase ,__UpperCamelCase ,dim=1 )
else:
lowercase_ : Union[str, Any] = noise_pred
# compute previous image: x_t -> x_t-1
lowercase_ : List[Any] = self.scheduler.step(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ).prev_sample
if guidance_scale > 1:
lowercase_ , lowercase_ : Union[str, Any] = latent_model_input.chunk(2 ,dim=0 )
else:
lowercase_ : Optional[int] = latent_model_input
lowercase_ : Union[str, Any] = 1 / self.vae.config.scaling_factor * latents
lowercase_ : Optional[Any] = self.vae.decode(__UpperCamelCase ).sample
lowercase_ : str = (samples / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase_ : str = samples.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowercase_ : Tuple = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__UpperCamelCase )
| 425 | 0 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class __UpperCAmelCase( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self , snake_case__=None , snake_case__=None , **snake_case__ ):
'''simple docstring'''
super().__init__(features=snake_case__ )
import jax
from jaxlib.xla_client import Device
if isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(snake_case__ )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
lowercase__ : Optional[Any]= device if isinstance(snake_case__ , snake_case__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase__ : Optional[int]= self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
lowercase__ : Optional[int]= str(jax.devices()[0] )
lowercase__ : Any= jnp_array_kwargs
@staticmethod
def UpperCAmelCase_ ( ):
'''simple docstring'''
import jax
return {str(snake_case__ ): device for device in jax.devices()}
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(snake_case__ , snake_case__ ) and column:
if all(
isinstance(snake_case__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(snake_case__ , axis=0 )
return column
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(snake_case__ , (str, bytes, type(snake_case__ )) ):
return value
elif isinstance(snake_case__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase__ : Dict= {}
if isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase__ : Dict= {"dtype": jnp.intaa}
else:
lowercase__ : List[str]= {"dtype": jnp.intaa}
elif isinstance(snake_case__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase__ : Optional[int]= {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(snake_case__ , PIL.Image.Image ):
lowercase__ : Optional[int]= np.asarray(snake_case__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase__ : Optional[Any]= self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(snake_case__ , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(snake_case__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(snake_case__ , "__array__" ) and not isinstance(snake_case__ , jax.Array ):
lowercase__ : Tuple= data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(snake_case__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] )
elif isinstance(snake_case__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(snake_case__ ) for substruct in data_struct] )
return self._tensorize(snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , snake_case__ , map_list=snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= self.numpy_arrow_extractor().extract_row(snake_case__ )
lowercase__ : List[str]= self.python_features_decoder.decode_row(snake_case__ )
return self.recursive_tensorize(snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Dict= self.numpy_arrow_extractor().extract_column(snake_case__ )
lowercase__ : Union[str, Any]= self.python_features_decoder.decode_column(snake_case__ , pa_table.column_names[0] )
lowercase__ : Tuple= self.recursive_tensorize(snake_case__ )
lowercase__ : Union[str, Any]= self._consolidate(snake_case__ )
return column
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Optional[Any]= self.numpy_arrow_extractor().extract_batch(snake_case__ )
lowercase__ : Tuple= self.python_features_decoder.decode_batch(snake_case__ )
lowercase__ : List[str]= self.recursive_tensorize(snake_case__ )
for column_name in batch:
lowercase__ : Tuple= self._consolidate(batch[column_name] )
return batch
| 85 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = "big_bird"
def __init__( self , snake_case__=50358 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu_new" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=4096 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=True , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=66 , snake_case__="block_sparse" , snake_case__=True , snake_case__=False , snake_case__=64 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , sep_token_id=snake_case__ , **snake_case__ , )
lowercase__ : Dict= vocab_size
lowercase__ : Optional[int]= max_position_embeddings
lowercase__ : List[Any]= hidden_size
lowercase__ : List[str]= num_hidden_layers
lowercase__ : List[str]= num_attention_heads
lowercase__ : Optional[int]= intermediate_size
lowercase__ : Optional[int]= hidden_act
lowercase__ : Tuple= hidden_dropout_prob
lowercase__ : int= attention_probs_dropout_prob
lowercase__ : int= initializer_range
lowercase__ : List[Any]= type_vocab_size
lowercase__ : Union[str, Any]= layer_norm_eps
lowercase__ : Optional[Any]= use_cache
lowercase__ : Union[str, Any]= rescale_embeddings
lowercase__ : Union[str, Any]= attention_type
lowercase__ : Any= use_bias
lowercase__ : List[Any]= block_size
lowercase__ : Optional[Any]= num_random_blocks
lowercase__ : Optional[int]= classifier_dropout
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase__ : List[Any]= {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : Tuple= {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 85 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.