code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from ... import PretrainedConfig
__snake_case = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class lowercase ( A__ ):
"""simple docstring"""
_a = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_a = 'nezha'
def __init__( self , UpperCamelCase_=21128 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=64 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0.1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=3 , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = vocab_size
UpperCamelCase__ :Dict = hidden_size
UpperCamelCase__ :str = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :List[Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :List[str] = hidden_dropout_prob
UpperCamelCase__ :Optional[int] = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :int = max_relative_position
UpperCamelCase__ :Optional[int] = type_vocab_size
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :Any = layer_norm_eps
UpperCamelCase__ :Dict = classifier_dropout
UpperCamelCase__ :Optional[int] = use_cache | 97 |
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300 | 0 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a_ = pytest.mark.integration
a_ = {"comet"}
a_ = importlib.util.find_spec("fairseq") is not None
a_ = {"code_eval"}
a_ = os.name == "nt"
a_ = {"bertscore", "frugalscore", "perplexity"}
a_ = importlib.util.find_spec("transformers") is not None
def a__ ( __lowercase ) -> Optional[int]:
@wraps(__lowercase )
def wrapper(self , __lowercase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , __lowercase )
return wrapper
def a__ ( __lowercase ) -> List[Any]:
@wraps(__lowercase )
def wrapper(self , __lowercase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , __lowercase )
return wrapper
def a__ ( __lowercase ) -> List[Any]:
@wraps(__lowercase )
def wrapper(self , __lowercase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , __lowercase )
return wrapper
def a__ ( ) -> str:
_A = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names())
@for_all_test_methods(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
@local
class snake_case ( parameterized.TestCase):
__UpperCamelCase = {}
__UpperCamelCase = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def a_ ( self : Dict , a__ : Tuple ) -> List[Any]:
'''simple docstring'''
_A = "[...]"
_A = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , a__ ) ).module_path )
_A = datasets.load.import_main_class(metric_module.__name__ , dataset=a__ )
# check parameters
_A = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(a__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
_A = doctest.testmod(a__ , verbose=a__ , raise_on_error=a__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def a_ ( self : Union[str, Any] , a__ : Dict ) -> Optional[int]:
'''simple docstring'''
_A = "[...]"
_A = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , a__ ) ).module_path )
# run doctest
with self.use_local_metrics():
_A = doctest.testmod(a__ , verbose=a__ , raise_on_error=a__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def a_ ( self : Dict , a__ : Dict , a__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](a__ ):
yield
else:
yield
@contextmanager
def a_ ( self : List[Any] ) -> str:
'''simple docstring'''
def load_local_metric(a__ : Dict , *a__ : Optional[Any] , **a__ : Optional[Any] ):
return load_metric(os.path.join("metrics" , a__ ) , *a__ , **a__ )
with patch("datasets.load_metric" ) as mock_load_metric:
_A = load_local_metric
yield
@classmethod
def a_ ( cls : List[str] , a__ : List[str] ) -> str:
'''simple docstring'''
def wrapper(a__ : List[str] ):
_A = contextmanager(a__ )
_A = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def a__ ( __lowercase ) -> int:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class snake_case ( _UpperCamelCase):
def a_ ( self : List[str] , a__ : int ) -> Any:
'''simple docstring'''
assert len(input_dict["input_ids"] ) == 2
return np.array([1.0_3, 1.0_4] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
_A = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def a__ ( __lowercase ) -> int:
import torch
def bert_cos_score_idf(__lowercase , __lowercase , *__lowercase , **__lowercase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__lowercase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
_A = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def a__ ( __lowercase ) -> Union[str, Any]:
def load_from_checkpoint(__lowercase ):
class snake_case :
def a_ ( self : Optional[Any] , a__ : Optional[Any] , *a__ : int , **a__ : str ) -> List[str]:
'''simple docstring'''
assert len(a__ ) == 2
_A = [0.1_9, 0.9_2]
return scores, sum(a__ ) / len(a__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
_A = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
_A = load_from_checkpoint
yield
def a__ ( ) -> List[Any]:
_A = load_metric(os.path.join("metrics" , "seqeval" ) )
_A = "ERROR"
_A = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(__lowercase , match=re.escape(__lowercase ) ):
metric.compute(predictions=[] , references=[] , scheme=__lowercase ) | 163 |
"""simple docstring"""
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"kwargs, expected" , [
({"num_shards": 0, "max_num_jobs": 1}, []),
({"num_shards": 10, "max_num_jobs": 1}, [range(10 )]),
({"num_shards": 10, "max_num_jobs": 10}, [range(__lowercase , i + 1 ) for i in range(10 )]),
({"num_shards": 1, "max_num_jobs": 10}, [range(1 )]),
({"num_shards": 10, "max_num_jobs": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"num_shards": 3, "max_num_jobs": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def a__ ( __lowercase , __lowercase ) -> Optional[Any]:
_A = _distribute_shards(**__lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, max_num_jobs, expected" , [
({"foo": 0}, 10, [{"foo": 0}]),
({"shards": [0, 1, 2, 3]}, 1, [{"shards": [0, 1, 2, 3]}]),
({"shards": [0, 1, 2, 3]}, 4, [{"shards": [0]}, {"shards": [1]}, {"shards": [2]}, {"shards": [3]}]),
({"shards": [0, 1]}, 4, [{"shards": [0]}, {"shards": [1]}]),
({"shards": [0, 1, 2, 3]}, 2, [{"shards": [0, 1]}, {"shards": [2, 3]}]),
] , )
def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]:
_A = _split_gen_kwargs(__lowercase , __lowercase )
assert out == expected
@pytest.mark.parametrize(
"gen_kwargs, expected" , [
({"foo": 0}, 1),
({"shards": [0]}, 1),
({"shards": [0, 1, 2, 3]}, 4),
({"shards": [0, 1, 2, 3], "foo": 0}, 4),
({"shards": [0, 1, 2, 3], "other": (0, 1)}, 4),
({"shards": [0, 1, 2, 3], "shards2": [0, 1]}, RuntimeError),
] , )
def a__ ( __lowercase , __lowercase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowercase ):
_number_of_shards_in_gen_kwargs(__lowercase )
else:
_A = _number_of_shards_in_gen_kwargs(__lowercase )
assert out == expected | 163 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
"""simple docstring"""
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase_ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase_ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase_ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase_ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Value('''string''' ,id='''sequence''' ),
} ) ,codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] ,reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ) -> Dict:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def UpperCAmelCase ( self : Dict ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Tuple=0.9 ,_snake_case : Optional[int]=3 ,_snake_case : Union[str, Any]=0.5 ) -> List[str]:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ : int = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) ,word_tokenize(_snake_case ) ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
else:
lowercase__ : Tuple = [
meteor_score.single_meteor_score(_snake_case ,_snake_case ,alpha=_snake_case ,beta=_snake_case ,gamma=_snake_case )
for ref, pred in zip(_snake_case ,_snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 16 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__lowerCamelCase : Tuple = """https://www.indeed.co.in/jobs?q=mobile+app+development&l="""
def SCREAMING_SNAKE_CASE ( snake_case_ : str = "mumbai" ):
snake_case__ : Tuple = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
snake_case__ : int = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
snake_case__ : List[str] = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs("""Bangalore"""), 1):
print(f"Job {i:>2} is {job[0]} at {job[1]}")
| 286 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : str , __A : Optional[Any]=1_3 , __A : Dict=7 , __A : List[str]=True , __A : Any=True , __A : str=True , __A : Optional[Any]=True , __A : List[str]=9_9 , __A : Dict=3_2 , __A : Tuple=2 , __A : Tuple=4 , __A : Dict=3_7 , __A : Tuple="gelu" , __A : Any=0.1 , __A : str=0.1 , __A : int=5_1_2 , __A : Union[str, Any]=1_6 , __A : Optional[int]=2 , __A : Union[str, Any]=0.0_2 , __A : Tuple=3 , __A : Union[str, Any]=4 , __A : Optional[int]=None , ):
snake_case__ : Optional[int] = parent
snake_case__ : Optional[Any] = 1_3
snake_case__ : int = 7
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = True
snake_case__ : List[str] = True
snake_case__ : int = True
snake_case__ : Optional[int] = 9_9
snake_case__ : Union[str, Any] = 3_8_4
snake_case__ : Optional[Any] = 2
snake_case__ : Union[str, Any] = 4
snake_case__ : Any = 3_7
snake_case__ : Any = "gelu"
snake_case__ : str = 0.1
snake_case__ : Optional[Any] = 0.1
snake_case__ : Union[str, Any] = 5_1_2
snake_case__ : Optional[Any] = 1_6
snake_case__ : List[Any] = 2
snake_case__ : Optional[int] = 0.0_2
snake_case__ : Dict = 3
snake_case__ : Any = 4
snake_case__ : int = 1_2_8
snake_case__ : Dict = 2
snake_case__ : Any = 9
snake_case__ : List[str] = 1
snake_case__ : List[Any] = None
def _lowercase ( self : List[str] ):
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[Any] = None
snake_case__ : Any = None
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : int = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Dict , __A : Dict , __A : Dict , __A : Union[str, Any] , __A : Optional[int] , __A : Any , __A : Union[str, Any] , __A : Tuple ):
snake_case__ : Optional[int] = TFConvBertModel(config=__A )
snake_case__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ : List[str] = [input_ids, input_mask]
snake_case__ : Union[str, Any] = model(__A )
snake_case__ : str = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Union[str, Any] , __A : List[Any] , __A : Any , __A : Union[str, Any] , __A : int , __A : Optional[Any] , __A : Dict , __A : Optional[int] ):
snake_case__ : List[str] = TFConvBertForMaskedLM(config=__A )
snake_case__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : int = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Tuple , __A : Union[str, Any] , __A : List[Any] , __A : Any , __A : List[Any] , __A : List[Any] , __A : Optional[int] , __A : List[str] ):
snake_case__ : Any = self.num_labels
snake_case__ : List[Any] = TFConvBertForSequenceClassification(config=__A )
snake_case__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : Optional[int] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : int , __A : List[Any] , __A : Union[str, Any] , __A : Optional[Any] , __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ):
snake_case__ : Optional[Any] = self.num_choices
snake_case__ : Any = TFConvBertForMultipleChoice(config=__A )
snake_case__ : Optional[int] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[Any] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[int] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
snake_case__ : Optional[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : List[str] , __A : Tuple , __A : str , __A : Union[str, Any] , __A : Union[str, Any] , __A : Any , __A : int , __A : Tuple ):
snake_case__ : Dict = self.num_labels
snake_case__ : str = TFConvBertForTokenClassification(config=__A )
snake_case__ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : List[str] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , __A : Union[str, Any] , __A : List[Any] , __A : List[str] , __A : Any , __A : Any , __A : Optional[int] , __A : Optional[Any] ):
snake_case__ : Any = TFConvBertForQuestionAnswering(config=__A )
snake_case__ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : int = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Any ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : List[str] = config_and_inputs
snake_case__ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a_ = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
a_ = False
def _lowercase ( self : int ):
snake_case__ : Optional[Any] = TFConvBertModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def _lowercase ( self : List[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Any ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def _lowercase ( self : Dict ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def _lowercase ( self : Optional[int] ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _lowercase ( self : Dict ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def _lowercase ( self : Dict ):
snake_case__, snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
snake_case__ : int = True
if hasattr(__A , "use_cache" ):
snake_case__ : Optional[Any] = True
snake_case__ : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case__ : List[str] = getattr(self.model_tester , "key_length" , __A )
for model_class in self.all_model_classes:
snake_case__ : Tuple = self._prepare_for_class(__A , __A )
snake_case__ : List[str] = model_class(__A )
snake_case__ : List[Any] = len(model(__A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A , saved_model=__A )
snake_case__ : str = os.path.join(__A , "saved_model" , "1" )
snake_case__ : str = tf.keras.models.load_model(__A )
snake_case__ : Optional[Any] = model(__A )
if self.is_encoder_decoder:
snake_case__ : Tuple = outputs["encoder_hidden_states"]
snake_case__ : str = outputs["encoder_attentions"]
else:
snake_case__ : Dict = outputs["hidden_states"]
snake_case__ : Tuple = outputs["attentions"]
self.assertEqual(len(__A ) , __A )
snake_case__ : int = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__A ) , __A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__A )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[Any] = True
snake_case__ : List[Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
snake_case__ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case__ : Any = getattr(self.model_tester , "key_length" , __A )
snake_case__ : List[Any] = getattr(self.model_tester , "key_length" , __A )
def check_decoder_attentions_output(__A : Optional[int] ):
snake_case__ : Optional[Any] = len(__A )
self.assertEqual(out_len % 2 , 0 )
snake_case__ : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__A : Any ):
snake_case__ : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = True
snake_case__ : Any = False
snake_case__ : Dict = model_class(__A )
snake_case__ : List[Any] = model(self._prepare_for_class(__A , __A ) )
snake_case__ : Dict = len(__A )
self.assertEqual(config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
if self.is_encoder_decoder:
snake_case__ : str = model_class(__A )
snake_case__ : List[Any] = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(config.output_hidden_states , __A )
check_decoder_attentions_output(__A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = model_class(__A )
snake_case__ : Union[str, Any] = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
# Check attention is always last and order is fine
snake_case__ : Optional[int] = True
snake_case__ : List[Any] = True
snake_case__ : Any = model_class(__A )
snake_case__ : str = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__A ) )
self.assertEqual(model.config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : int ):
snake_case__ : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
snake_case__ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : str = model(__A )[0]
snake_case__ : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __A )
snake_case__ : List[Any] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1e-4 )
| 286 | 1 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] ):
A__ = [1]
for i in range(2 , _lowercase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
A__ = []
A__ = list(range(_lowercase ) )
# Find permutation
while factorials:
A__ = factorials.pop()
A__ = divmod(_lowercase , _lowercase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237 |
import math
import sys
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : str = """"""
try:
with open(_lowercase , """rb""") as binary_file:
a__ : Any = binary_file.read()
for dat in data:
a__ : Dict = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""")
sys.exit()
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : Optional[Any] = {"""0""": """0""", """1""": """1"""}
a__ , a__ : Optional[int] = """""", """"""
a__ : int = len(_lowercase)
for i in range(len(_lowercase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a__ : List[str] = lexicon[curr_string]
result += last_match_id
a__ : Any = last_match_id + """0"""
if math.loga(_lowercase).is_integer():
a__ : Union[str, Any] = {}
for curr_key in list(_lowercase):
a__ : Optional[Any] = lexicon.pop(_lowercase)
a__ : Union[str, Any] = new_lex
a__ : str = last_match_id + """1"""
index += 1
a__ : List[Any] = """"""
return result
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> None:
"""simple docstring"""
a__ : List[Any] = 8
try:
with open(_lowercase , """wb""") as opened_file:
a__ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowercase) , _lowercase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append("""10000000""")
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowercase , 2).to_bytes(1 , byteorder="""big"""))
except OSError:
print("""File not accessible""")
sys.exit()
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
a__ : Optional[Any] = data_bits[counter:]
a__ : Tuple = data_bits[counter + 1 :]
return data_bits
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> None:
"""simple docstring"""
a__ : Dict = read_file_binary(_lowercase)
a__ : str = remove_prefix(_lowercase)
a__ : List[str] = decompress_data(_lowercase)
write_file_binary(_lowercase , _lowercase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 170 | 0 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=1E-12 ):
'''simple docstring'''
__snake_case : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T
__snake_case : Union[str, Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T
return jnp.matmul(__SCREAMING_SNAKE_CASE , norm_emb_a.T )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
A : CLIPConfig
A : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : str ):
__snake_case : List[Any] = FlaxCLIPVisionModule(self.config.vision_config )
__snake_case : Union[str, Any] = nn.Dense(self.config.projection_dim , use_bias=_lowerCAmelCase , dtype=self.dtype )
__snake_case : Optional[Any] = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__snake_case : Optional[int] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__snake_case : Union[str, Any] = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
__snake_case : List[str] = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
__snake_case : Tuple = self.vision_model(_lowerCAmelCase )[1]
__snake_case : int = self.visual_projection(_lowerCAmelCase )
__snake_case : List[Any] = jax_cosine_distance(_lowerCAmelCase , self.special_care_embeds )
__snake_case : Optional[int] = jax_cosine_distance(_lowerCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__snake_case : List[str] = 0.0
__snake_case : List[str] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__snake_case : Tuple = jnp.round(_lowerCAmelCase , 3 )
__snake_case : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=_lowerCAmelCase )
# Use a lower threshold if an image has any special care concept
__snake_case : str = is_special_care * 0.01
__snake_case : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__snake_case : Any = jnp.round(_lowerCAmelCase , 3 )
__snake_case : Optional[int] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
A : Union[str, Any] = CLIPConfig
A : List[str] = "clip_input"
A : str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Tuple , _lowerCAmelCase : CLIPConfig , _lowerCAmelCase : Optional[Tuple] = None , _lowerCAmelCase : int = 0 , _lowerCAmelCase : jnp.dtype = jnp.floataa , _lowerCAmelCase : bool = True , **_lowerCAmelCase : List[Any] , ):
if input_shape is None:
__snake_case : List[str] = (1, 2_24, 2_24, 3)
__snake_case : Dict = self.module_class(config=_lowerCAmelCase , dtype=_lowerCAmelCase , **_lowerCAmelCase )
super().__init__(_lowerCAmelCase , _lowerCAmelCase , input_shape=_lowerCAmelCase , seed=_lowerCAmelCase , dtype=_lowerCAmelCase , _do_init=_do_init )
def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : jax.random.KeyArray , _lowerCAmelCase : Tuple , _lowerCAmelCase : FrozenDict = None ):
# init input tensor
__snake_case : int = jax.random.normal(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : Dict = jax.random.split(_lowerCAmelCase )
__snake_case : Dict = {"""params""": params_rng, """dropout""": dropout_rng}
__snake_case : Union[str, Any] = self.module.init(_lowerCAmelCase , _lowerCAmelCase )["""params"""]
return random_params
def __call__( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : dict = None , ):
__snake_case : Optional[int] = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(_lowerCAmelCase , dtype=jnp.floataa ) , rngs={} , )
| 363 | import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def snake_case__ ( self : Any ):
__snake_case : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__snake_case : List[str] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__snake_case : str = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Any = os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
# load decoder from hub
__snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder"""
def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Tuple ):
__snake_case : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowerCAmelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Union[str, Any] , **_lowerCAmelCase : Optional[int] ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def snake_case__ ( self : Dict , **_lowerCAmelCase : Tuple ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase )
def snake_case__ ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self : Union[str, Any] ):
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : Tuple = self.get_feature_extractor()
__snake_case : Dict = self.get_decoder()
__snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def snake_case__ ( self : int ):
__snake_case : Tuple = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_lowerCAmelCase , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def snake_case__ ( self : Dict ):
__snake_case : int = self.get_feature_extractor()
__snake_case : str = self.get_tokenizer()
__snake_case : Dict = self.get_decoder()
__snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : List[Any] = floats_list((3, 10_00) )
__snake_case : Optional[Any] = feature_extractor(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Tuple = processor(_lowerCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self : Optional[int] ):
__snake_case : Any = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = """This is a test string"""
__snake_case : Union[str, Any] = processor(text=_lowerCAmelCase )
__snake_case : Dict = tokenizer(_lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[Any]=(2, 10, 16) , _lowerCAmelCase : str=77 ):
np.random.seed(_lowerCAmelCase )
return np.random.rand(*_lowerCAmelCase )
def snake_case__ ( self : Tuple ):
__snake_case : List[str] = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__snake_case : int = processor.decode(_lowerCAmelCase )
__snake_case : Optional[int] = decoder.decode_beams(_lowerCAmelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] ):
__snake_case : int = self.get_feature_extractor()
__snake_case : Union[str, Any] = self.get_tokenizer()
__snake_case : int = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__snake_case : Tuple = processor.batch_decode(_lowerCAmelCase )
else:
with get_context(_lowerCAmelCase ).Pool() as pool:
__snake_case : int = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase )
__snake_case : int = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as p:
__snake_case : Tuple = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase )
__snake_case , __snake_case , __snake_case : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowerCAmelCase , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score )
self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score )
def snake_case__ ( self : Optional[int] ):
__snake_case : Optional[Any] = self.get_feature_extractor()
__snake_case : int = self.get_tokenizer()
__snake_case : str = self.get_decoder()
__snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : int = self._get_dummy_logits()
__snake_case : List[str] = 15
__snake_case : Optional[Any] = -20.0
__snake_case : Tuple = -4.0
__snake_case : List[Any] = processor.batch_decode(
_lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : List[str] = decoded_processor_out.text
__snake_case : str = list(_lowerCAmelCase )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Dict = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out]
__snake_case : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1e-3 ) )
def snake_case__ ( self : Any ):
__snake_case : List[Any] = self.get_feature_extractor()
__snake_case : Any = self.get_tokenizer()
__snake_case : Union[str, Any] = self.get_decoder()
__snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
__snake_case : Any = self._get_dummy_logits()
__snake_case : Any = 2.0
__snake_case : int = 5.0
__snake_case : Optional[int] = -20.0
__snake_case : Optional[int] = True
__snake_case : Any = processor.batch_decode(
_lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
__snake_case : str = decoded_processor_out.text
__snake_case : int = list(_lowerCAmelCase )
decoder.reset_params(
alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , )
with get_context("""fork""" ).Pool() as pool:
__snake_case : Tuple = decoder.decode_beams_batch(
_lowerCAmelCase , _lowerCAmelCase , )
__snake_case : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase )
__snake_case : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowerCAmelCase )
def snake_case__ ( self : Dict ):
__snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : Union[str, Any] = os.listdir(_lowerCAmelCase )
__snake_case : List[str] = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
__snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase )
__snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
__snake_case : List[str] = os.listdir(_lowerCAmelCase )
__snake_case : List[Any] = os.listdir(_lowerCAmelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
def snake_case__ ( self : Optional[Any] ):
__snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = floats_list((3, 10_00) )
__snake_case : Union[str, Any] = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" )
__snake_case : Union[str, Any] = processor_auto(_lowerCAmelCase , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__snake_case : Dict = self._get_dummy_logits()
__snake_case : List[Any] = processor_wavaveca.batch_decode(_lowerCAmelCase )
__snake_case : List[Any] = processor_auto.batch_decode(_lowerCAmelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def snake_case__ ( self : str ):
__snake_case : int = self.get_feature_extractor()
__snake_case : List[str] = self.get_tokenizer()
__snake_case : Optional[Any] = self.get_decoder()
__snake_case : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def snake_case__ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
__snake_case : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self : Dict ):
__snake_case : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : List[str] = self._get_dummy_logits()[0]
__snake_case : str = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def snake_case__ ( self : List[str] ):
__snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
__snake_case : Optional[int] = self._get_dummy_logits()
__snake_case : int = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def snake_case__ ( self : Optional[Any] ):
import torch
__snake_case : Optional[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase )
__snake_case : Any = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) )
__snake_case : List[Any] = iter(_lowerCAmelCase )
__snake_case : Optional[int] = next(_lowerCAmelCase )
__snake_case : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
__snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__snake_case : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
__snake_case : Dict = model(_lowerCAmelCase ).logits.cpu().numpy()
__snake_case : Any = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase )
__snake_case : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__snake_case : Dict = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__snake_case : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase )
self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text )
# output times
__snake_case : Dict = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) )
__snake_case : Optional[Any] = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) )
# fmt: off
__snake_case : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__snake_case : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
| 20 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
__A = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=a_ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=a_ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=a_ )
return parser.parse_args()
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
__A = parse_args()
# Import training_script as a module.
__A = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A = script_fpath.stem
__A = importlib.import_module(a_ )
# Patch sys.argv
__A = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 15 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
__A = args.pruning_method
__A = args.threshold
__A = args.model_name_or_path.rstrip("/" )
__A = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
__A = torch.load(os.path.join(a_ , "pytorch_model.bin" ) )
__A = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__A = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
__A = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
__A = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
__A = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__A = name[:-6]
__A = model[F'''{prefix_}mask_scores''']
__A = TopKBinarizer.apply(a_ , a_ )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__A = name[:-6]
__A = model[F'''{prefix_}mask_scores''']
__A = ThresholdBinarizer.apply(a_ , a_ , a_ )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__A = name[:-6]
__A = model[F'''{prefix_}mask_scores''']
__A , __A = -0.1, 1.1
__A = torch.sigmoid(a_ )
__A = s * (r - l) + l
__A = s_bar.clamp(min=0.0 , max=1.0 )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
__A = os.path.join(
os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' )
if not os.path.isdir(a_ ):
shutil.copytree(a_ , a_ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(a_ , os.path.join(a_ , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
SCREAMING_SNAKE_CASE :str = parser.parse_args()
main(args)
| 15 | 1 |
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Dict = 1
while len(A__ ) < 1E6:
constant.append(str(A__ ) )
i += 1
lowerCAmelCase_ : Tuple = """""".join(A__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 361 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[str] = logging.get_logger(__name__)
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : Tuple=False ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCamelCase_ ( A__ : Any , A__ : Any , A__ : Tuple=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : Optional[Any] = """"""
else:
lowerCAmelCase_ : Optional[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Union[str, Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : List[Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase_ ( A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def UpperCamelCase_ ( A__ : List[Any] , A__ : Optional[Any] , A__ : Dict ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = dct.pop(A__ )
lowerCAmelCase_ : Tuple = val
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : Optional[int] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = ViTConfig()
lowerCAmelCase_ : Any = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Tuple = int(vit_name[-12:-10] )
lowerCAmelCase_ : Optional[int] = int(vit_name[-9:-6] )
else:
lowerCAmelCase_ : Optional[int] = 10_00
lowerCAmelCase_ : Tuple = """huggingface/label-files"""
lowerCAmelCase_ : Any = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : Dict = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Union[str, Any] = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Union[str, Any] = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = int(vit_name[-6:-4] )
lowerCAmelCase_ : Dict = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
lowerCAmelCase_ : int = 1_92
lowerCAmelCase_ : List[str] = 7_68
lowerCAmelCase_ : List[str] = 12
lowerCAmelCase_ : int = 3
elif vit_name[9:].startswith("""small""" ):
lowerCAmelCase_ : Optional[Any] = 3_84
lowerCAmelCase_ : Optional[int] = 15_36
lowerCAmelCase_ : Dict = 12
lowerCAmelCase_ : str = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
lowerCAmelCase_ : Tuple = 7_68
lowerCAmelCase_ : Any = 23_04
lowerCAmelCase_ : List[str] = 8
lowerCAmelCase_ : List[str] = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
lowerCAmelCase_ : Dict = 10_24
lowerCAmelCase_ : List[Any] = 40_96
lowerCAmelCase_ : Any = 24
lowerCAmelCase_ : List[str] = 16
elif vit_name[4:].startswith("""huge""" ):
lowerCAmelCase_ : Optional[int] = 12_80
lowerCAmelCase_ : Dict = 51_20
lowerCAmelCase_ : Union[str, Any] = 32
lowerCAmelCase_ : Optional[int] = 16
# load original model from timm
lowerCAmelCase_ : Union[str, Any] = timm.create_model(A__ , pretrained=A__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : int = timm_model.state_dict()
if base_model:
remove_classification_head_(A__ )
lowerCAmelCase_ : str = create_rename_keys(A__ , A__ )
for src, dest in rename_keys:
rename_key(A__ , A__ , A__ )
read_in_q_k_v(A__ , A__ , A__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase_ : int = ViTModel(A__ ).eval()
else:
lowerCAmelCase_ : Optional[int] = ViTForImageClassification(A__ ).eval()
model.load_state_dict(A__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase_ : Any = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase_ : Any = ViTImageProcessor(size=config.image_size )
lowerCAmelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : int = encoding["""pixel_values"""]
lowerCAmelCase_ : int = model(A__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = timm_model.forward_features(A__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A__ , outputs.pooler_output , atol=1E-3 )
else:
lowerCAmelCase_ : Union[str, Any] = timm_model(A__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A__ , outputs.logits , atol=1E-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(A__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__A : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 89 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCamelCase__ ( lowerCAmelCase = 1_50_00_00 ):
"""simple docstring"""
_lowerCAmelCase = defaultdict(lowerCAmelCase )
_lowerCAmelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , lowerCAmelCase , 2 ):
if gcd(lowerCAmelCase , lowerCAmelCase ) > 1:
continue
_lowerCAmelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowerCAmelCase , limit + 1 , lowerCAmelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 70 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __snake_case ( ):
__a , __a = 9, 14 # noqa: F841
__a = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__a = defaultdict(_UpperCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__a = mst(_UpperCAmelCase )
__a = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__a = tuple(answer[:2] )
__a = tuple(edge[::-1] )
assert edge in result or reverse in result
| 49 | 0 |
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase)
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = 50 , __a = "pil" , __a = True , **__a , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_lowercase , )
_UpperCamelCase = image.to(self.device)
# set step values
self.scheduler.set_timesteps(_lowercase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
_UpperCamelCase = self.unet(_lowercase , _lowercase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(_lowercase , _lowercase , _lowercase).prev_sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(_lowercase)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_lowercase), "This is a local test"
| 366 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[int]:
"""simple docstring"""
if len(__snake_case ) <= 1 or n <= 1:
return
insert_next(__snake_case, n - 1 )
rec_insertion_sort(__snake_case, n - 1 )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
if index >= len(__snake_case ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_UpperCamelCase , _UpperCamelCase = (
collection[index],
collection[index - 1],
)
insert_next(__snake_case, index + 1 )
if __name__ == "__main__":
_a = input("""Enter integers separated by spaces: """)
_a = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 100 | 0 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 ) -> str:
"""simple docstring"""
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
SCREAMING_SNAKE_CASE__ : Any = [[w, v]]
if not self.graph.get(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : List[str] = []
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
return list(self.graph )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=-2 , SCREAMING_SNAKE_CASE__=-1 ) -> Dict:
"""simple docstring"""
if s == d:
return []
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : Tuple = []
if s == -2:
SCREAMING_SNAKE_CASE__ : Any = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
SCREAMING_SNAKE_CASE__ : Tuple = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return visited
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=-1 ) -> int:
"""simple docstring"""
if c == -1:
SCREAMING_SNAKE_CASE__ : int = floor(random() * 1_00_00 ) + 10
for i in range(SCREAMING_SNAKE_CASE__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
SCREAMING_SNAKE_CASE__ : Tuple = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=-2 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = deque()
SCREAMING_SNAKE_CASE__ : str = []
if s == -2:
SCREAMING_SNAKE_CASE__ : str = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
while d:
SCREAMING_SNAKE_CASE__ : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return len(self.graph[u] )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=-2 ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Dict = []
if s == -2:
SCREAMING_SNAKE_CASE__ : Any = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = s
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return sorted_nodes
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : List[Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = -2
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : int = s
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
SCREAMING_SNAKE_CASE__ : Tuple = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
SCREAMING_SNAKE_CASE__ : Tuple = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
SCREAMING_SNAKE_CASE__ : List[str] = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = s
SCREAMING_SNAKE_CASE__ : Optional[int] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return list(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = -2
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = s
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
SCREAMING_SNAKE_CASE__ : Any = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
SCREAMING_SNAKE_CASE__ : List[Any] = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Tuple = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = s
SCREAMING_SNAKE_CASE__ : int = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return False
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=-2 , SCREAMING_SNAKE_CASE__=-1 ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = time()
self.dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = time()
return end - begin
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=-2 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = time()
self.bfs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = time()
return end - begin
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 ) -> int:
"""simple docstring"""
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
SCREAMING_SNAKE_CASE__ : Any = [[w, v]]
# add the other way
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[w, u]]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(SCREAMING_SNAKE_CASE__ )
# the other way round
if self.graph.get(SCREAMING_SNAKE_CASE__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=-2 , SCREAMING_SNAKE_CASE__=-1 ) -> Optional[int]:
"""simple docstring"""
if s == d:
return []
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : str = []
if s == -2:
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(SCREAMING_SNAKE_CASE__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
SCREAMING_SNAKE_CASE__ : List[str] = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return visited
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=-1 ) -> Tuple:
"""simple docstring"""
if c == -1:
SCREAMING_SNAKE_CASE__ : Dict = floor(random() * 1_00_00 ) + 10
for i in range(SCREAMING_SNAKE_CASE__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
SCREAMING_SNAKE_CASE__ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=-2 ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = deque()
SCREAMING_SNAKE_CASE__ : int = []
if s == -2:
SCREAMING_SNAKE_CASE__ : str = list(self.graph )[0]
d.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
while d:
SCREAMING_SNAKE_CASE__ : List[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
return len(self.graph[u] )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : int = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = -2
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = s
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
SCREAMING_SNAKE_CASE__ : Any = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
SCREAMING_SNAKE_CASE__ : int = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = s
SCREAMING_SNAKE_CASE__ : Any = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return list(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Any = list(self.graph )[0]
stack.append(SCREAMING_SNAKE_CASE__ )
visited.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = -2
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : Optional[int] = s
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
SCREAMING_SNAKE_CASE__ : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
SCREAMING_SNAKE_CASE__ : str = len(SCREAMING_SNAKE_CASE__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
if len(SCREAMING_SNAKE_CASE__ ) != 0:
SCREAMING_SNAKE_CASE__ : Dict = stack[len(SCREAMING_SNAKE_CASE__ ) - 1]
else:
SCREAMING_SNAKE_CASE__ : List[Any] = False
indirect_parents.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = s
SCREAMING_SNAKE_CASE__ : Tuple = ss
# check if se have reached the starting point
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return False
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
return list(self.graph )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=-2 , SCREAMING_SNAKE_CASE__=-1 ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = time()
self.dfs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = time()
return end - begin
def __magic_name__ (self , SCREAMING_SNAKE_CASE__=-2 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = time()
self.bfs(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = time()
return end - begin
| 25 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
UpperCAmelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
requires_backends(self , """vision""" )
self.check_model_type(SCREAMING_SNAKE_CASE__ )
def __call__(self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , **SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
return {}, {}, {}
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = load_image(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = image.size
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
return model_inputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model(**SCREAMING_SNAKE_CASE__ )
return model_outputs
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs.predicted_depth
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = prediction.squeeze().cpu().numpy()
SCREAMING_SNAKE_CASE__ : Any = (output * 2_55 / np.max(SCREAMING_SNAKE_CASE__ )).astype("""uint8""" )
SCREAMING_SNAKE_CASE__ : List[str] = Image.fromarray(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : Any = predicted_depth
SCREAMING_SNAKE_CASE__ : Dict = depth
return output_dict
| 25 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase ( __a , __a , __a ):
'''simple docstring'''
_A : Tuple = [r'''h\.\d+\.attn\.bias''', r'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : Union[str, Any] , __a : int , __a : int , __a : Optional[int] = None , __a : int = 50257 , __a : int = 1024 , __a : int = 768 , __a : int = 12 , __a : int = 12 , __a : Optional[int] = None , __a : str = "gelu_new" , __a : float = 0.1 , __a : float = 0.1 , __a : float = 0.1 , __a : float = 1E-5 , __a : float = 0.02 , __a : bool = True , __a : bool = True , __a : bool = False , __a : bool = False , ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase : Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
F" `n_embd`: {n_embd} are not equal." )
__lowercase : List[Any] = prefix_inner_dim
__lowercase : int = prefix_hidden_dim
__lowercase : List[str] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__lowercase : List[str] = (
nn.Linear(self.prefix_hidden_dim , __a ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__lowercase : Tuple = GPTaConfig(
vocab_size=__a , n_positions=__a , n_embd=__a , n_layer=__a , n_head=__a , n_inner=__a , activation_function=__a , resid_pdrop=__a , embd_pdrop=__a , attn_pdrop=__a , layer_norm_epsilon=__a , initializer_range=__a , scale_attn_weights=__a , use_cache=__a , scale_attn_by_inverse_layer_idx=__a , reorder_and_upcast_attn=__a , )
__lowercase : Any = GPTaLMHeadModel(__a )
def lowerCAmelCase ( self : List[str] , __a : torch.Tensor , __a : torch.Tensor , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = self.transformer.transformer.wte(__a )
__lowercase : int = self.encode_prefix(__a )
__lowercase : int = self.decode_prefix(__a )
__lowercase : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__lowercase : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__lowercase : Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
__lowercase : Any = self.transformer(inputs_embeds=__a , labels=__a , attention_mask=__a )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCAmelCase ( self : Any , __a : int , __a : torch.device ) -> torch.Tensor:
"""simple docstring"""
return torch.zeros(__a , self.prefix_length , dtype=torch.intaa , device=__a )
def lowerCAmelCase ( self : Any , __a : Union[str, Any] ) -> str:
"""simple docstring"""
return self.encode_prefix(__a )
@torch.no_grad()
def lowerCAmelCase ( self : Optional[int] , __a : Tuple , __a : Tuple , __a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : str = torch.split(__a , 1 , dim=0 )
__lowercase : List[Any] = []
__lowercase : int = []
for feature in features:
__lowercase : Optional[Any] = self.decode_prefix(feature.to(__a ) ) # back to the clip feature
# Only support beam search for now
__lowercase , __lowercase : int = self.generate_beam(
input_embeds=__a , device=__a , eos_token_id=__a )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__lowercase : Any = torch.stack(__a )
__lowercase : Union[str, Any] = torch.stack(__a )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCAmelCase ( self : Any , __a : Optional[Any]=None , __a : List[Any]=None , __a : Any=None , __a : int = 5 , __a : int = 67 , __a : float = 1.0 , __a : Optional[int] = None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = eos_token_id
__lowercase : Dict = None
__lowercase : Dict = None
__lowercase : int = torch.ones(__a , device=__a , dtype=torch.int )
__lowercase : List[str] = torch.zeros(__a , device=__a , dtype=torch.bool )
if input_embeds is not None:
__lowercase : Optional[Any] = input_embeds
else:
__lowercase : List[str] = self.transformer.transformer.wte(__a )
for i in range(__a ):
__lowercase : Union[str, Any] = self.transformer(inputs_embeds=__a )
__lowercase : int = outputs.logits
__lowercase : List[str] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__lowercase : List[str] = logits.softmax(-1 ).log()
if scores is None:
__lowercase , __lowercase : int = logits.topk(__a , -1 )
__lowercase : Dict = generated.expand(__a , *generated.shape[1:] )
__lowercase , __lowercase : int = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__lowercase : List[Any] = next_tokens
else:
__lowercase : Union[str, Any] = tokens.expand(__a , *tokens.shape[1:] )
__lowercase : Tuple = torch.cat((tokens, next_tokens) , dim=1 )
else:
__lowercase : Any = -float(np.inf )
__lowercase : Tuple = 0
__lowercase : List[Any] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__lowercase : Dict = scores_sum / seq_lengths[:, None]
__lowercase , __lowercase : Optional[int] = scores_sum_average.view(-1 ).topk(__a , -1 )
__lowercase : Tuple = next_tokens // scores_sum.shape[1]
__lowercase : Tuple = seq_lengths[next_tokens_source]
__lowercase : str = next_tokens % scores_sum.shape[1]
__lowercase : str = next_tokens.unsqueeze(1 )
__lowercase : Optional[int] = tokens[next_tokens_source]
__lowercase : str = torch.cat((tokens, next_tokens) , dim=1 )
__lowercase : int = generated[next_tokens_source]
__lowercase : Optional[Any] = scores_sum_average * seq_lengths
__lowercase : List[str] = is_stopped[next_tokens_source]
__lowercase : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__lowercase : List[Any] = torch.cat((generated, next_token_embed) , dim=1 )
__lowercase : List[str] = is_stopped + next_tokens.eq(__a ).squeeze()
if is_stopped.all():
break
__lowercase : str = scores / seq_lengths
__lowercase : Any = scores.argsort(descending=__a )
# tokens tensors are already padded to max_seq_length
__lowercase : Any = [tokens[i] for i in order]
__lowercase : Any = torch.stack(__a , dim=0 )
__lowercase : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths | 306 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 306 | 1 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _A ( _lowercase ) -> bool:
"""simple docstring"""
__UpperCamelCase = int(number**0.5 )
return number == sq * sq
def _A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> tuple[int, int]:
"""simple docstring"""
__UpperCamelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__UpperCamelCase = x_den * y_den * z_den
__UpperCamelCase = gcd(_lowercase , _lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def _A ( _lowercase = 35 ) -> int:
"""simple docstring"""
__UpperCamelCase = set()
__UpperCamelCase = 42
__UpperCamelCase = Fraction(0 )
__UpperCamelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__UpperCamelCase = x_num * y_den + x_den * y_num
__UpperCamelCase = x_den * y_den
__UpperCamelCase = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCamelCase = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
__UpperCamelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__UpperCamelCase = x_den * x_den * y_den * y_den
if is_sq(_lowercase ) and is_sq(_lowercase ):
__UpperCamelCase = int(sqrt(_lowercase ) )
__UpperCamelCase = int(sqrt(_lowercase ) )
__UpperCamelCase = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCamelCase = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=-1
__UpperCamelCase = x_num * y_num
__UpperCamelCase = x_den * y_num + x_num * y_den
__UpperCamelCase = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCamelCase = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
# n=2
__UpperCamelCase = x_num * x_num * y_num * y_num
__UpperCamelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowercase ) and is_sq(_lowercase ):
__UpperCamelCase = int(sqrt(_lowercase ) )
__UpperCamelCase = int(sqrt(_lowercase ) )
__UpperCamelCase = gcd(_lowercase , _lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__UpperCamelCase = add_three(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
unique_s.add(_lowercase )
for num, den in unique_s:
total += Fraction(_lowercase , _lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 310 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _A ( _lowercase ) -> Dict:
"""simple docstring"""
if is_torch_version('<' , '2.0.0' ) or not hasattr(_lowercase , '_dynamo' ):
return False
return isinstance(_lowercase , torch._dynamo.eval_frame.OptimizedModule )
def _A ( _lowercase , _lowercase = True ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__UpperCamelCase = is_compiled_module(_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_lowercase , _lowercase ):
__UpperCamelCase = model.module
if not keep_fpaa_wrapper:
__UpperCamelCase = getattr(_lowercase , 'forward' )
__UpperCamelCase = model.__dict__.pop('_original_forward' , _lowercase )
if original_forward is not None:
while hasattr(_lowercase , '__wrapped__' ):
__UpperCamelCase = forward.__wrapped__
if forward == original_forward:
break
__UpperCamelCase = forward
if getattr(_lowercase , '_converted_to_transformer_engine' , _lowercase ):
convert_model(_lowercase , to_transformer_engine=_lowercase )
if is_compiled:
__UpperCamelCase = model
__UpperCamelCase = compiled_model
return model
def _A ( ) -> Any:
"""simple docstring"""
PartialState().wait_for_everyone()
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_lowercase , _lowercase )
elif PartialState().local_process_index == 0:
torch.save(_lowercase , _lowercase )
@contextmanager
def _A ( **_lowercase ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
__UpperCamelCase = str(_lowercase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _A ( _lowercase ) -> Tuple:
"""simple docstring"""
if not hasattr(_lowercase , '__qualname__' ) and not hasattr(_lowercase , '__name__' ):
__UpperCamelCase = getattr(_lowercase , '__class__' , _lowercase )
if hasattr(_lowercase , '__qualname__' ):
return obj.__qualname__
if hasattr(_lowercase , '__name__' ):
return obj.__name__
return str(_lowercase )
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
for key, value in source.items():
if isinstance(_lowercase , _lowercase ):
__UpperCamelCase = destination.setdefault(_lowercase , {} )
merge_dicts(_lowercase , _lowercase )
else:
__UpperCamelCase = value
return destination
def _A ( _lowercase = None ) -> bool:
"""simple docstring"""
if port is None:
__UpperCamelCase = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 310 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Union[str, Any] ) -> Tuple:
UpperCAmelCase_ : List[Any] = tempfile.mkdtemp()
UpperCAmelCase_ : Any = BlipImageProcessor()
UpperCAmelCase_ : Optional[Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCAmelCase_ : List[Any] = BlipaProcessor(lowerCamelCase_ ,lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def A__ ( self: Optional[Any] ,**lowerCamelCase_: List[Any] ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ).tokenizer
def A__ ( self: str ,**lowerCamelCase_: List[str] ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase_ ).image_processor
def A__ ( self: Union[str, Any] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def A__ ( self: List[str] ) -> List[str]:
UpperCAmelCase_ : List[str] = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(lowerCamelCase_ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : List[str] = BlipaProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
UpperCAmelCase_ : Any = self.get_image_processor(do_normalize=lowerCamelCase_ ,padding_value=1.0 )
UpperCAmelCase_ : List[Any] = BlipaProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=lowerCamelCase_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = BlipaProcessor(tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = image_processor(lowerCamelCase_ ,return_tensors="""np""" )
UpperCAmelCase_ : Any = processor(images=lowerCamelCase_ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def A__ ( self: List[str] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : List[str] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = BlipaProcessor(tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """lower newer"""
UpperCAmelCase_ : int = processor(text=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = tokenizer(lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def A__ ( self: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : int = self.get_image_processor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = BlipaProcessor(tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """lower newer"""
UpperCAmelCase_ : str = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = processor(text=lowerCamelCase_ ,images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def A__ ( self: int ) -> str:
UpperCAmelCase_ : List[Any] = self.get_image_processor()
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = BlipaProcessor(tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
UpperCAmelCase_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Tuple ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = BlipaProcessor(tokenizer=lowerCamelCase_ ,image_processor=lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = """lower newer"""
UpperCAmelCase_ : Optional[int] = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[Any] = processor(text=lowerCamelCase_ ,images=lowerCamelCase_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) ,["""pixel_values""", """input_ids""", """attention_mask"""] )
| 59 |
def lowerCamelCase_ ( _a : int , _a : list[int] , _a : int ):
'''simple docstring'''
def count_of_possible_combinations(_a : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def lowerCamelCase_ ( _a : int , _a : list[int] , _a : int ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a : int , _a : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
UpperCAmelCase_ : Any = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
UpperCAmelCase_ : Dict = answer
return answer
UpperCAmelCase_ : Tuple = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def lowerCamelCase_ ( _a : int , _a : list[int] , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = [0] * (target + 1)
UpperCAmelCase_ : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase_ = 3
UpperCamelCase_ = 5
UpperCamelCase_ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 59 | 1 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a ( lowercase__ ):
"""simple docstring"""
a : List[str] = 'MCTCTFeatureExtractor'
a : str = 'AutoTokenizer'
def __init__( self : Tuple , __lowercase : int , __lowercase : Dict ) -> Any:
super().__init__(__lowercase , __lowercase )
__UpperCAmelCase : Optional[Any] = self.feature_extractor
__UpperCAmelCase : Optional[int] = False
def __call__( self : int , *__lowercase : Tuple , **__lowercase : Optional[int] ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__lowercase , **__lowercase )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__UpperCAmelCase : Dict = kwargs.pop("""raw_speech""" )
else:
__UpperCAmelCase : Dict = kwargs.pop("""audio""" , __lowercase )
__UpperCAmelCase : List[str] = kwargs.pop("""sampling_rate""" , __lowercase )
__UpperCAmelCase : Tuple = kwargs.pop("""text""" , __lowercase )
if len(__lowercase ) > 0:
__UpperCAmelCase : Tuple = args[0]
__UpperCAmelCase : str = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__UpperCAmelCase : Tuple = self.feature_extractor(__lowercase , *__lowercase , sampling_rate=__lowercase , **__lowercase )
if text is not None:
__UpperCAmelCase : str = self.tokenizer(__lowercase , **__lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__UpperCAmelCase : Dict = encodings["""input_ids"""]
return inputs
def UpperCAmelCase ( self : Optional[Any] , *__lowercase : List[Any] , **__lowercase : int ) -> List[Any]:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def UpperCAmelCase ( self : Optional[int] , *__lowercase : Optional[Any] , **__lowercase : List[str] ) -> int:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*__lowercase , **__lowercase )
__UpperCAmelCase : Optional[int] = kwargs.pop("""input_features""" , __lowercase )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""labels""" , __lowercase )
if len(__lowercase ) > 0:
__UpperCAmelCase : Union[str, Any] = args[0]
__UpperCAmelCase : str = args[1:]
if input_features is not None:
__UpperCAmelCase : Any = self.feature_extractor.pad(__lowercase , *__lowercase , **__lowercase )
if labels is not None:
__UpperCAmelCase : Union[str, Any] = self.tokenizer.pad(__lowercase , **__lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__UpperCAmelCase : Any = labels["""input_ids"""]
return input_features
def UpperCAmelCase ( self : Any , *__lowercase : Union[str, Any] , **__lowercase : Dict ) -> List[Any]:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@contextmanager
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__UpperCAmelCase : Any = True
__UpperCAmelCase : Optional[int] = self.tokenizer
yield
__UpperCAmelCase : List[Any] = self.feature_extractor
__UpperCAmelCase : int = False
| 114 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Dict = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : int = 't5'
a : Dict = ['past_key_values']
a : Tuple = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : str , __lowercase : Optional[int]=32128 , __lowercase : Optional[int]=512 , __lowercase : int=64 , __lowercase : Any=2048 , __lowercase : Tuple=6 , __lowercase : Tuple=None , __lowercase : int=8 , __lowercase : List[Any]=32 , __lowercase : Dict=128 , __lowercase : Optional[int]=0.1 , __lowercase : int=1e-6 , __lowercase : List[str]=1.0 , __lowercase : List[str]="relu" , __lowercase : Dict=True , __lowercase : Optional[Any]=True , __lowercase : Tuple=0 , __lowercase : List[str]=1 , **__lowercase : Any , ) -> str:
__UpperCAmelCase : Tuple = vocab_size
__UpperCAmelCase : Optional[int] = d_model
__UpperCAmelCase : Union[str, Any] = d_kv
__UpperCAmelCase : Union[str, Any] = d_ff
__UpperCAmelCase : int = num_layers
__UpperCAmelCase : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__UpperCAmelCase : Dict = num_heads
__UpperCAmelCase : List[Any] = relative_attention_num_buckets
__UpperCAmelCase : List[str] = relative_attention_max_distance
__UpperCAmelCase : Union[str, Any] = dropout_rate
__UpperCAmelCase : List[str] = layer_norm_epsilon
__UpperCAmelCase : str = initializer_factor
__UpperCAmelCase : Dict = feed_forward_proj
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : List[Any] = self.feed_forward_proj.split("""-""" )
__UpperCAmelCase : Tuple = act_info[-1]
__UpperCAmelCase : int = act_info[0] == """gated"""
if len(__lowercase ) > 1 and act_info[0] != "gated" or len(__lowercase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__UpperCAmelCase : Dict = """gelu_new"""
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , **__lowercase , )
class a ( lowercase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self : str ) -> Mapping[str, Mapping[int, str]]:
__UpperCAmelCase : Union[str, Any] = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__UpperCAmelCase : List[Any] = """past_encoder_sequence + sequence"""
__UpperCAmelCase : Optional[int] = {0: """batch"""}
__UpperCAmelCase : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__UpperCAmelCase : str = {0: """batch""", 1: """decoder_sequence"""}
__UpperCAmelCase : str = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction="""inputs""" )
return common_inputs
@property
def UpperCAmelCase ( self : int ) -> int:
return 13
| 114 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :int ):
snake_case__ : Dict = tempfile.mkdtemp()
snake_case__ : List[str] = SamImageProcessor()
snake_case__ : str = SamProcessor(__lowercase )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self :Tuple ,**__lowercase :List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowercase ).image_processor
def __lowerCamelCase ( self :Any ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta )]
snake_case__ : Optional[Any] = [Image.fromarray(np.moveaxis(__lowercase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : List[str] = self.get_image_processor(do_normalize=__lowercase ,padding_value=1.0 )
snake_case__ : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=__lowercase ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__lowercase )
def __lowerCamelCase ( self :Dict ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : Tuple = SamProcessor(image_processor=__lowercase )
snake_case__ : List[str] = self.prepare_image_inputs()
snake_case__ : List[Any] = image_processor(__lowercase ,return_tensors='''np''' )
snake_case__ : int = processor(images=__lowercase ,return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
@require_torch
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Dict = self.get_image_processor()
snake_case__ : Optional[int] = SamProcessor(image_processor=__lowercase )
snake_case__ : str = [torch.ones((1, 3, 5, 5) )]
snake_case__ : Union[str, Any] = [[1_7_6_4, 2_6_4_6]]
snake_case__ : Tuple = [[6_8_3, 1_0_2_4]]
snake_case__ : Optional[Any] = processor.post_process_masks(__lowercase ,__lowercase ,__lowercase )
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6) )
snake_case__ : int = processor.post_process_masks(
__lowercase ,torch.tensor(__lowercase ) ,torch.tensor(__lowercase ) )
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
snake_case__ : List[str] = [np.ones((1, 3, 5, 5) )]
snake_case__ : int = processor.post_process_masks(__lowercase ,np.array(__lowercase ) ,np.array(__lowercase ) )
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6) )
snake_case__ : int = [[1, 0], [0, 1]]
with self.assertRaises(__lowercase ):
snake_case__ : List[str] = processor.post_process_masks(__lowercase ,np.array(__lowercase ) ,np.array(__lowercase ) )
@require_vision
@require_tf
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : int = tempfile.mkdtemp()
snake_case__ : List[str] = SamImageProcessor()
snake_case__ : Dict = SamProcessor(__lowercase )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self :Union[str, Any] ,**__lowercase :List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowercase ).image_processor
def __lowerCamelCase ( self :Optional[int] ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta )]
snake_case__ : int = [Image.fromarray(np.moveaxis(__lowercase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Any = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Optional[int] = self.get_image_processor(do_normalize=__lowercase ,padding_value=1.0 )
snake_case__ : Tuple = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=__lowercase ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__lowercase )
def __lowerCamelCase ( self :int ):
snake_case__ : Optional[Any] = self.get_image_processor()
snake_case__ : Tuple = SamProcessor(image_processor=__lowercase )
snake_case__ : int = self.prepare_image_inputs()
snake_case__ : Optional[Any] = image_processor(__lowercase ,return_tensors='''np''' )
snake_case__ : Optional[Any] = processor(images=__lowercase ,return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
@require_tf
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Dict = self.get_image_processor()
snake_case__ : Optional[Any] = SamProcessor(image_processor=__lowercase )
snake_case__ : Optional[int] = [tf.ones((1, 3, 5, 5) )]
snake_case__ : List[Any] = [[1_7_6_4, 2_6_4_6]]
snake_case__ : List[Any] = [[6_8_3, 1_0_2_4]]
snake_case__ : int = processor.post_process_masks(__lowercase ,__lowercase ,__lowercase ,return_tensors='''tf''' )
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6) )
snake_case__ : int = processor.post_process_masks(
__lowercase ,tf.convert_to_tensor(__lowercase ) ,tf.convert_to_tensor(__lowercase ) ,return_tensors='''tf''' ,)
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6) )
# should also work with np
snake_case__ : Tuple = [np.ones((1, 3, 5, 5) )]
snake_case__ : Optional[int] = processor.post_process_masks(
__lowercase ,np.array(__lowercase ) ,np.array(__lowercase ) ,return_tensors='''tf''' )
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6) )
snake_case__ : int = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
snake_case__ : Optional[Any] = processor.post_process_masks(
__lowercase ,np.array(__lowercase ) ,np.array(__lowercase ) ,return_tensors='''tf''' )
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Tuple = tempfile.mkdtemp()
snake_case__ : str = SamImageProcessor()
snake_case__ : List[str] = SamProcessor(__lowercase )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self :Any ,**__lowercase :List[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**__lowercase ).image_processor
def __lowerCamelCase ( self :Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self :Any ):
snake_case__ : Dict = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta )]
snake_case__ : Dict = [Image.fromarray(np.moveaxis(__lowercase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __lowerCamelCase ( self :str ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : str = SamProcessor(image_processor=__lowercase )
snake_case__ : Union[str, Any] = np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa )
snake_case__ : Any = [tf.convert_to_tensor(__lowercase )]
snake_case__ : Tuple = [torch.tensor(__lowercase )]
snake_case__ : List[Any] = [[1_7_6_4, 2_6_4_6]]
snake_case__ : str = [[6_8_3, 1_0_2_4]]
snake_case__ : List[Any] = processor.post_process_masks(
__lowercase ,__lowercase ,__lowercase ,return_tensors='''tf''' )
snake_case__ : Optional[int] = processor.post_process_masks(
__lowercase ,__lowercase ,__lowercase ,return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : str = self.get_image_processor()
snake_case__ : Optional[Any] = SamProcessor(image_processor=__lowercase )
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : Union[str, Any] = image_processor(__lowercase ,return_tensors='''pt''' )['''pixel_values'''].numpy()
snake_case__ : Any = processor(images=__lowercase ,return_tensors='''pt''' )['''pixel_values'''].numpy()
snake_case__ : Dict = image_processor(__lowercase ,return_tensors='''tf''' )['''pixel_values'''].numpy()
snake_case__ : str = processor(images=__lowercase ,return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(__lowercase ,__lowercase ) )
self.assertTrue(np.allclose(__lowercase ,__lowercase ) )
self.assertTrue(np.allclose(__lowercase ,__lowercase ) )
| 44 |
from sklearn.metrics import mean_squared_error
import datasets
A__ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A__ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A__ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] ,)
def __lowerCamelCase ( self :Tuple ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __lowerCamelCase ( self :List[str] ,__lowercase :Optional[int] ,__lowercase :int ,__lowercase :Any=None ,__lowercase :List[str]="uniform_average" ,__lowercase :List[Any]=True ):
snake_case__ : Union[str, Any] = mean_squared_error(
__lowercase ,__lowercase ,sample_weight=__lowercase ,multioutput=__lowercase ,squared=__lowercase )
return {"mse": mse}
| 44 | 1 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 600851475143 ):
'''simple docstring'''
try:
UpperCAmelCase__ = int(SCREAMING_SNAKE_CASE__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCAmelCase__ = i
while n % i == 0:
UpperCAmelCase__ = n // i
i += 1
return int(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 346 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 346 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Optional[int] = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class __snake_case ( a ):
UpperCAmelCase__ : str = '''switch_transformers'''
UpperCAmelCase__ : Tuple = ['''past_key_values''']
UpperCAmelCase__ : Any = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : str , _snake_case : str=32128 , _snake_case : str=768 , _snake_case : str=64 , _snake_case : Optional[Any]=2048 , _snake_case : Any=64 , _snake_case : Dict=12 , _snake_case : Optional[int]=3 , _snake_case : List[str]=12 , _snake_case : List[Any]=3 , _snake_case : Tuple=12 , _snake_case : List[Any]=8 , _snake_case : Tuple=False , _snake_case : Optional[Any]=0.0_1 , _snake_case : int="float32" , _snake_case : str=False , _snake_case : int=32 , _snake_case : List[Any]=128 , _snake_case : str=0.1 , _snake_case : Optional[Any]=1e-6 , _snake_case : Tuple=0.0_0_1 , _snake_case : Optional[int]=0.0_0_1 , _snake_case : List[Any]=1.0 , _snake_case : Tuple="relu" , _snake_case : List[str]=True , _snake_case : Dict=False , _snake_case : int=True , _snake_case : List[str]=0 , _snake_case : Any=1 , **_snake_case : Tuple , ):
"""simple docstring"""
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = d_kv
UpperCAmelCase_ = d_ff
UpperCAmelCase_ = num_sparse_encoder_layers
UpperCAmelCase_ = num_layers
UpperCAmelCase_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCAmelCase_ = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
UpperCAmelCase_ = self.num_layers // self.num_sparse_encoder_layers
else:
UpperCAmelCase_ = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
UpperCAmelCase_ = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
UpperCAmelCase_ = self.num_decoder_layers # HACK: this will create 0 sparse layers
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = num_experts
UpperCAmelCase_ = expert_capacity
UpperCAmelCase_ = router_bias
UpperCAmelCase_ = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
UpperCAmelCase_ = router_dtype
UpperCAmelCase_ = router_ignore_padding_tokens
UpperCAmelCase_ = relative_attention_num_buckets
UpperCAmelCase_ = relative_attention_max_distance
UpperCAmelCase_ = dropout_rate
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_factor
UpperCAmelCase_ = feed_forward_proj
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = add_router_probs
UpperCAmelCase_ = router_z_loss_coef
UpperCAmelCase_ = router_aux_loss_coef
UpperCAmelCase_ = self.feed_forward_proj.split('''-''')
UpperCAmelCase_ = act_info[-1]
UpperCAmelCase_ = act_info[0] == '''gated'''
if len(_snake_case) > 1 and act_info[0] != "gated" or len(_snake_case) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCAmelCase_ = '''gelu_new'''
super().__init__(
pad_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , **_snake_case , )
| 7 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
class __snake_case :
def __init__( self : int , _snake_case : List[Any] , _snake_case : Tuple):
"""simple docstring"""
UpperCAmelCase_ = question_encoder
UpperCAmelCase_ = generator
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Optional[int]):
"""simple docstring"""
if os.path.isfile(_snake_case):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""")
os.makedirs(_snake_case , exist_ok=_snake_case)
UpperCAmelCase_ = os.path.join(_snake_case , '''question_encoder_tokenizer''')
UpperCAmelCase_ = os.path.join(_snake_case , '''generator_tokenizer''')
self.question_encoder.save_pretrained(_snake_case)
self.generator.save_pretrained(_snake_case)
@classmethod
def lowerCamelCase ( cls : Optional[Any] , _snake_case : Optional[Any] , **_snake_case : Optional[int]):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase_ = kwargs.pop('''config''' , _snake_case)
if config is None:
UpperCAmelCase_ = RagConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=_snake_case , generator=_snake_case)
def __call__( self : List[Any] , *_snake_case : List[str] , **_snake_case : List[Any]):
"""simple docstring"""
return self.current_tokenizer(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[Any] , *_snake_case : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return self.generator.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , *_snake_case : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return self.generator.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.question_encoder
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.generator
def lowerCamelCase ( self : Optional[Any] , _snake_case : List[str] , _snake_case : Optional[List[str]] = None , _snake_case : Optional[int] = None , _snake_case : Optional[int] = None , _snake_case : str = "longest" , _snake_case : str = None , _snake_case : bool = True , **_snake_case : Optional[int] , ):
"""simple docstring"""
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase_ = self.current_tokenizer.model_max_length
UpperCAmelCase_ = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
UpperCAmelCase_ = labels['''input_ids''']
return model_inputs
| 7 | 1 |
from collections import namedtuple
UpperCAmelCase_ = namedtuple('from_to', 'from_ to')
UpperCAmelCase_ = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def lowerCamelCase__ ( A__ : float , A__ : str , A__ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
from __future__ import annotations
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Optional[Any] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase_ ( _snake_case : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase_ ( _snake_case : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase_ ( _snake_case : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
__magic_name__ : int = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : Optional[Any] = Node(4 )
__magic_name__ : Union[str, Any] = Node(5 )
__magic_name__ : Any = Node(6 )
__magic_name__ : int = Node(7 )
__magic_name__ : List[str] = Node(8 )
__magic_name__ : Union[str, Any] = Node(9 )
print(is_full_binary_tree(_snake_case ) )
print(depth_of_tree(_snake_case ) )
print("Tree is: " )
display(_snake_case )
if __name__ == "__main__":
main()
| 281 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : str ) -> List[Any]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
__SCREAMING_SNAKE_CASE = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sgugger/tiny-distilbert-classification"""
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , only_pretrain_model=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , torchscript=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
# set architectures equal to `None`
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] )
__SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__SCREAMING_SNAKE_CASE , multi_process=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] )
__SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sshleifer/tinier_bart"""
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] )
__SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] )
__SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sshleifer/tinier_bart"""
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] )
__SCREAMING_SNAKE_CASE = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , save_to_csv=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , """train_time.csv""" ) , env_info_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , """env.csv""" ) , multi_process=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE )
benchmark.run()
self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , """env.csv""" ) ).exists() )
def UpperCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(__SCREAMING_SNAKE_CASE : Optional[Any] ):
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """sequential""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """cumulative""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """current""" ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__SCREAMING_SNAKE_CASE , """log.txt""" ) , log_print=__SCREAMING_SNAKE_CASE , trace_memory_line_by_line=__SCREAMING_SNAKE_CASE , multi_process=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , """log.txt""" ) ).exists() )
| 331 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Any = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : Optional[Any] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Dict = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
UpperCAmelCase : str = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCAmelCase__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : argparse.Namespace , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict="base" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = config
__SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert hasattr(self.config , __SCREAMING_SNAKE_CASE ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __SCREAMING_SNAKE_CASE , getattr(self.hparams , __SCREAMING_SNAKE_CASE ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = model
def UpperCAmelCase__ ( self : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model
__SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.weight"""]
__SCREAMING_SNAKE_CASE = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE = Adafactor(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=__SCREAMING_SNAKE_CASE , relative_step=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = AdamW(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE = optimizer
__SCREAMING_SNAKE_CASE = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
return self.validation_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.validation_end(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
if stage == "test":
__SCREAMING_SNAKE_CASE = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(self.train_dataloader().dataset )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> int:
"""simple docstring"""
raise NotImplementedError("""You must implement this for your task""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
__SCREAMING_SNAKE_CASE , list(filter(__SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.output_dir.joinpath("""best_tfmr""" )
__SCREAMING_SNAKE_CASE = self.step_count
self.model.save_pretrained(__SCREAMING_SNAKE_CASE )
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=__SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=__SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=__SCREAMING_SNAKE_CASE , metavar=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--train_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--eval_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = trainer.lr_schedulers[0]["""scheduler"""]
__SCREAMING_SNAKE_CASE = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> List[Any]:
"""simple docstring"""
rank_zero_info("""***** Validation results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log results
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> str:
"""simple docstring"""
rank_zero_info("""***** Test results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer:
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def a__ ( a__ , a__ ):
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(a__ ).parent / """test_run""" / """model_checkpoints""" ) , type=a__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=a__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=a__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=a__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=a__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=a__ , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(a__ ).parent / """test_run""" / """dummy-train-data""" ) , type=a__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def a__ ( a__ , a__ , a__=None , a__=True , a__=[] , a__=None , a__=None , **a__ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=a__ )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(a__ )
if logging_callback is None:
__SCREAMING_SNAKE_CASE = LoggingCallback()
__SCREAMING_SNAKE_CASE = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = """ddp"""
__SCREAMING_SNAKE_CASE = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = pl.Trainer.from_argparse_args(
a__ , weights_summary=a__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a__ , val_check_interval=1 , num_sanity_val_steps=2 , **a__ , )
if args.do_train:
trainer.fit(a__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 331 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 339 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __snake_case ( lowerCAmelCase ):
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(*snake_case ,**snake_case )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ):
'''simple docstring'''
lowercase : List[Any] = {}
if top_k is not None:
lowercase : int = top_k
return {}, {}, postprocess_params
def __call__( self ,snake_case ,**snake_case ):
'''simple docstring'''
return super().__call__(snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Any = load_image(snake_case )
lowercase : List[Any] = self.image_processor(images=snake_case ,return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : int = self.model(**snake_case )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowercase : Tuple = self.model.config.num_labels
if self.framework == "pt":
lowercase : str = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase : Dict = probs.topk(snake_case )
elif self.framework == "tf":
lowercase : Optional[int] = stable_softmax(model_outputs.logits ,axis=-1 )[0]
lowercase : Union[str, Any] = tf.math.top_k(snake_case ,k=snake_case )
lowercase , lowercase : List[str] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
lowercase : Tuple = scores.tolist()
lowercase : Dict = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case ,snake_case )]
| 20 | 0 |
import string
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
for key in range(len(string.ascii_uppercase ) ):
__lowerCamelCase : List[str] = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowerCamelCase : str = string.ascii_uppercase.find(lowerCamelCase__ )
__lowerCamelCase : Dict = num - key
if num < 0:
__lowerCamelCase : Union[str, Any] = num + len(string.ascii_uppercase )
__lowerCamelCase : str = translated + string.ascii_uppercase[num]
else:
__lowerCamelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : int = input('Encrypted message: ' )
__lowerCamelCase : List[Any] = message.upper()
decrypt(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 113 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = '''vit_msn'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : int=7_6_8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 ,SCREAMING_SNAKE_CASE__ : Tuple=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.02 ,SCREAMING_SNAKE_CASE__ : int=1E-06 ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_2_4 ,SCREAMING_SNAKE_CASE__ : Dict=1_6 ,SCREAMING_SNAKE_CASE__ : int=3 ,SCREAMING_SNAKE_CASE__ : List[str]=True ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : Union[str, Any] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : List[str] = qkv_bias
| 113 | 1 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ :Optional[int] = logging.get_logger(__name__)
lowerCAmelCase__ :str = '''▁'''
lowerCAmelCase__ :Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
lowerCAmelCase__ :List[str] = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
lowerCAmelCase__ :List[Any] = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
lowerCAmelCase__ :List[str] = {
'''ernie-m-base''': 5_1_4,
'''ernie-m-large''': 5_1_4,
}
lowerCAmelCase__ :Tuple = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class __a ( UpperCAmelCase ):
_a : str = ['input_ids']
_a : Union[str, Any] = VOCAB_FILES_NAMES
_a : Tuple = PRETRAINED_INIT_CONFIGURATION
_a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_a : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="utf8" , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , vocab_file=_SCREAMING_SNAKE_CASE , encoding=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = sentencepiece_model_ckpt
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
_UpperCAmelCase = self.load_vocab(filepath=_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = {self.sp_model.id_to_piece(_SCREAMING_SNAKE_CASE ): id for id in range(self.sp_model.get_piece_size() )}
_UpperCAmelCase = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
if text is None:
return None
_UpperCAmelCase = self.tokenize(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = '', []
for i, ch in enumerate(_SCREAMING_SNAKE_CASE ):
if ch in self.SP_CHAR_MAPPING:
_UpperCAmelCase = self.SP_CHAR_MAPPING.get(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = unicodedata.normalize('NFKC' , _SCREAMING_SNAKE_CASE )
if self.is_whitespace(_SCREAMING_SNAKE_CASE ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = normalized_text, [], 0
if self.do_lower_case:
_UpperCAmelCase = text.lower()
for token in split_tokens:
if token[:1] == "▁":
_UpperCAmelCase = token[1:]
_UpperCAmelCase = text[offset:].index(_SCREAMING_SNAKE_CASE ) + offset
_UpperCAmelCase = start + len(_SCREAMING_SNAKE_CASE )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
_UpperCAmelCase = end
return token_mapping
@property
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
return len(self.vocab )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for c in text) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=0.1 ) -> Union[str, Any]:
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
_UpperCAmelCase = True
if self.sp_model_kwargs.get('alpha' ) is not None:
_UpperCAmelCase = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
_UpperCAmelCase = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
_UpperCAmelCase = self.sp_model.EncodeAsPieces(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = self.sp_model.SampleEncodeAsPieces(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = []
for pi, piece in enumerate(_SCREAMING_SNAKE_CASE ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_SCREAMING_SNAKE_CASE ) and pi != 0:
new_pieces.append(_SCREAMING_SNAKE_CASE )
continue
else:
continue
_UpperCAmelCase = 0
for i, chunk in enumerate(_SCREAMING_SNAKE_CASE ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_SCREAMING_SNAKE_CASE ) or self.is_punct(_SCREAMING_SNAKE_CASE ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_UpperCAmelCase = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
_UpperCAmelCase = i
if len(_SCREAMING_SNAKE_CASE ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = ''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ' ' ).strip()
return out_string
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , ' ' ).strip()
return out_string
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(_SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return self.reverse_vocab.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_SCREAMING_SNAKE_CASE ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_SCREAMING_SNAKE_CASE ) + 1) + [1] * (len(_SCREAMING_SNAKE_CASE ) + 3)
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_SCREAMING_SNAKE_CASE ) == 1:
_UpperCAmelCase = unicodedata.category(_SCREAMING_SNAKE_CASE )
if cat == "Zs":
return True
return False
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = {}
with io.open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = line.rstrip('\n' )
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return token_to_idx
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
_UpperCAmelCase = 0
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
_UpperCAmelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
_UpperCAmelCase = token_index
writer.write(token + '\n' )
index += 1
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , 'sentencepiece.bpe.model' )
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (vocab_file,)
| 329 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = ["""input_features"""]
def __init__( self , lowerCAmelCase=80 , lowerCAmelCase=16_000 , lowerCAmelCase=160 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=0.0 , lowerCAmelCase=False , **lowerCAmelCase , ) -> Any:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
_lowercase =n_fft
_lowercase =hop_length
_lowercase =chunk_length
_lowercase =chunk_length * sampling_rate
_lowercase =self.n_samples // hop_length
_lowercase =sampling_rate
_lowercase =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , )
def A__ ( self , lowerCAmelCase ) -> np.ndarray:
'''simple docstring'''
_lowercase =spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
_lowercase =log_spec[:, :-1]
_lowercase =np.maximum(lowerCAmelCase , log_spec.max() - 8.0 )
_lowercase =(log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def A__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_lowercase =np.array(lowerCAmelCase , np.intaa )
_lowercase =[]
for vector, length in zip(lowerCAmelCase , attention_mask.sum(-1 ) ):
_lowercase =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_lowercase =padding_value
normed_input_values.append(lowerCAmelCase )
else:
_lowercase =[(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "max_length" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase =isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase =is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
_lowercase =np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase =[np.asarray([raw_speech] ).T]
_lowercase =BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
_lowercase =self.pad(
lowerCAmelCase , padding=lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_lowercase =self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
_lowercase =np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
_lowercase =padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
_lowercase =[self._np_extract_fbank_features(lowerCAmelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , lowerCAmelCase ):
_lowercase =[np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
else:
_lowercase =input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_lowercase =padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
_lowercase =padded_inputs.convert_to_tensors(lowerCAmelCase )
return padded_inputs
def A__ ( self ) -> Dict[str, Any]:
'''simple docstring'''
_lowercase =copy.deepcopy(self.__dict__ )
_lowercase =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 205 | 0 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCAmelCase__:
'''simple docstring'''
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return self.get_dummy_input()
@property
def UpperCamelCase_ ( self ) -> Any:
if self.block_type == "down":
return (4, 3_2, 1_6, 1_6)
elif self.block_type == "mid":
return (4, 3_2, 3_2, 3_2)
elif self.block_type == "up":
return (4, 3_2, 6_4, 6_4)
raise ValueError(F"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def UpperCamelCase_ ( self , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : List[str] = 4
_SCREAMING_SNAKE_CASE : Optional[int] = 3_2
_SCREAMING_SNAKE_CASE : List[str] = (3_2, 3_2)
_SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE : int = torch.device(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = (batch_size, num_channels) + sizes
_SCREAMING_SNAKE_CASE : List[str] = randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = {"hidden_states": hidden_states}
if include_temb:
_SCREAMING_SNAKE_CASE : Tuple = 1_2_8
_SCREAMING_SNAKE_CASE : Tuple = randn_tensor((batch_size, temb_channels) , generator=__lowerCamelCase , device=__lowerCamelCase )
if include_res_hidden_states_tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(1 )
_SCREAMING_SNAKE_CASE : Optional[int] = (randn_tensor(__lowerCamelCase , generator=__lowerCamelCase , device=__lowerCamelCase ),)
if include_encoder_hidden_states:
_SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((batch_size, 3_2, 3_2) ).to(__lowerCamelCase )
if include_skip_sample:
_SCREAMING_SNAKE_CASE : Dict = randn_tensor(((batch_size, 3) + sizes) , generator=__lowerCamelCase , device=__lowerCamelCase )
return dummy_input
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : List[Any] = {
"in_channels": 3_2,
"out_channels": 3_2,
"temb_channels": 1_2_8,
}
if self.block_type == "up":
_SCREAMING_SNAKE_CASE : str = 3_2
if self.block_type == "mid":
init_dict.pop("out_channels" )
_SCREAMING_SNAKE_CASE : Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.prepare_init_args_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : List[str] = self.block_class(**__lowerCamelCase )
unet_block.to(__lowerCamelCase )
unet_block.eval()
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = unet_block(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = output[0]
self.assertEqual(output.shape , self.output_shape )
_SCREAMING_SNAKE_CASE : Optional[Any] = output[0, -1, -3:, -3:]
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor(__lowerCamelCase ).to(__lowerCamelCase )
assert torch_all_close(output_slice.flatten() , __lowerCamelCase , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.prepare_init_args_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : Dict = self.block_class(**__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
_SCREAMING_SNAKE_CASE : str = model(**__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = output[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.device(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = randn_tensor(output.shape , device=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = torch.nn.functional.mse_loss(__lowerCamelCase , __lowerCamelCase )
loss.backward() | 325 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x | 325 | 1 |
from __future__ import annotations
from typing import Any
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
create_state_space_tree(snake_case_ , [] , 0 )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if index == len(snake_case_ ):
print(snake_case_ )
return
create_state_space_tree(snake_case_ , snake_case_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(snake_case_ , snake_case_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowercase_ : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 133 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Optional[Any] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : int = "roberta"
def __init__( self : Dict , snake_case__ : Tuple=50_265 , snake_case__ : str=768 , snake_case__ : Tuple=12 , snake_case__ : Tuple=12 , snake_case__ : Union[str, Any]=3_072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : int=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=512 , snake_case__ : List[str]=2 , snake_case__ : str=0.02 , snake_case__ : int=1e-12 , snake_case__ : List[str]=1 , snake_case__ : Any=0 , snake_case__ : int=2 , snake_case__ : List[Any]="absolute" , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=None , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase__ ):
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 133 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Namespace) -> Any:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name)
lowercase : Optional[Any] = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class lowerCamelCase__ ( a_):
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( a :Optional[int] ) -> Optional[Any]:
__UpperCamelCase : List[Any] = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=a , required=a , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=a , required=a , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=a , required=a , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=a , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=a , default=a , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=a )
def __init__( self :int , a :Dict , a :List[str] , a :List[Any] , a :Optional[int] , a :Any , *a :Optional[int] , ) -> List[Any]:
__UpperCamelCase : Optional[Any] = logging.get_logger("transformers-cli/converting" )
self._logger.info(f'Loading model {model_type}' )
__UpperCamelCase : int = model_type
__UpperCamelCase : Optional[int] = tf_checkpoint
__UpperCamelCase : Optional[int] = pytorch_dump_output
__UpperCamelCase : Dict = config
__UpperCamelCase : str = finetuning_task_name
def _lowerCamelCase ( self :Tuple ) -> Union[str, Any]:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(a )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
if "ckpt" in self._tf_checkpoint.lower():
__UpperCamelCase : int = self._tf_checkpoint
__UpperCamelCase : Tuple = ""
else:
__UpperCamelCase : List[Any] = self._tf_checkpoint
__UpperCamelCase : str = ""
convert_transfo_xl_checkpoint_to_pytorch(
a , self._config , self._pytorch_dump_output , a )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(a )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" ) | 350 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[Any] = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 151 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 196 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return " ".join(
''''''.join(word[::-1] ) if len(SCREAMING_SNAKE_CASE ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 105 | import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__lowercase = '''bert-base-cased'''
__lowercase = '''google/pegasus-xsum'''
__lowercase = [''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
__lowercase = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
__lowercase = '''patrickvonplaten/t5-tiny-random'''
__lowercase = '''sshleifer/bart-tiny-random'''
__lowercase = '''sshleifer/tiny-mbart'''
__lowercase = '''sshleifer/tiny-marian-en-de'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = '''\n'''.join(SCREAMING_SNAKE_CASE )
Path(SCREAMING_SNAKE_CASE ).open('''w''' ).writelines(SCREAMING_SNAKE_CASE )
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , f"""{split}.source""" ) , SCREAMING_SNAKE_CASE )
_dump_articles(os.path.join(SCREAMING_SNAKE_CASE , f"""{split}.target""" ) , SCREAMING_SNAKE_CASE )
return tmp_dir
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :Dict = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__UpperCamelCase :List[Any] = max(len(tokenizer.encode(__lowercase)) for a in ARTICLES)
__UpperCamelCase :Optional[int] = max(len(tokenizer.encode(__lowercase)) for a in SUMMARIES)
__UpperCamelCase :int = 4
__UpperCamelCase :Any = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__UpperCamelCase , __UpperCamelCase :Tuple = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
__UpperCamelCase :str = SeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=__lowercase , max_target_length=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase , )
__UpperCamelCase :Any = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(__lowercase , __lowercase)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__UpperCamelCase :Optional[int] = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def UpperCamelCase__ ( self , __lowercase) -> int:
__UpperCamelCase :Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
__UpperCamelCase :int = max(len(tokenizer.encode(__lowercase)) for a in ARTICLES)
__UpperCamelCase :Dict = max(len(tokenizer.encode(__lowercase)) for a in SUMMARIES)
__UpperCamelCase :Union[str, Any] = 4
__UpperCamelCase :List[str] = LegacySeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=20 , max_target_length=__lowercase , )
__UpperCamelCase :Dict = DataLoader(__lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''')
__UpperCamelCase :Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
__UpperCamelCase :str = tmp_dir.joinpath('''train.source''').open().readlines()
__UpperCamelCase :int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(__lowercase , __lowercase , 128 , __lowercase)
__UpperCamelCase :Union[str, Any] = {x.name for x in tmp_dir.iterdir()}
__UpperCamelCase :int = {x.name for x in save_dir.iterdir()}
__UpperCamelCase :Optional[int] = save_dir.joinpath('''train.source''').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__lowercase) < len(__lowercase)
assert len(__lowercase) == 1
assert len(packed_examples[0]) == sum(len(__lowercase) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''')
def UpperCamelCase__ ( self) -> List[Any]:
if not FAIRSEQ_AVAILABLE:
return
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Any = self._get_dataset(max_len=64)
__UpperCamelCase :Union[str, Any] = 64
__UpperCamelCase :Tuple = ds.make_dynamic_sampler(__lowercase , required_batch_size_multiple=__lowercase)
__UpperCamelCase :List[str] = [len(__lowercase) for x in batch_sampler]
assert len(set(__lowercase)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__lowercase) == len(__lowercase) # no dropped or added examples
__UpperCamelCase :int = DataLoader(__lowercase , batch_sampler=__lowercase , collate_fn=ds.collate_fn , num_workers=2)
__UpperCamelCase :List[str] = []
__UpperCamelCase :int = []
for batch in data_loader:
__UpperCamelCase :List[Any] = batch['''input_ids'''].shape
__UpperCamelCase :Dict = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__UpperCamelCase :Optional[int] = np.product(batch['''input_ids'''].shape)
num_src_per_batch.append(__lowercase)
if num_src_tokens > (max_tokens * 1.1):
failures.append(__lowercase)
assert num_src_per_batch[0] == max(__lowercase)
if failures:
raise AssertionError(f"""too many tokens in {len(__lowercase)} batches""")
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[Any] = self._get_dataset(max_len=512)
__UpperCamelCase :Any = 2
__UpperCamelCase :List[Any] = ds.make_sortish_sampler(__lowercase , shuffle=__lowercase)
__UpperCamelCase :List[Any] = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2)
__UpperCamelCase :Tuple = DataLoader(__lowercase , batch_size=__lowercase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowercase)
__UpperCamelCase :int = tokenizer.pad_token_id
def count_pad_tokens(__lowercase , __lowercase="input_ids"):
return [batch[k].eq(__lowercase).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__lowercase , k='''labels''')) < sum(count_pad_tokens(__lowercase , k='''labels'''))
assert sum(count_pad_tokens(__lowercase)) < sum(count_pad_tokens(__lowercase))
assert len(__lowercase) == len(__lowercase)
def UpperCamelCase__ ( self , __lowercase=1_000 , __lowercase=128) -> List[Any]:
if os.getenv('''USE_REAL_DATA''' , __lowercase):
__UpperCamelCase :Optional[Any] = '''examples/seq2seq/wmt_en_ro'''
__UpperCamelCase :Dict = max_len * 2 * 64
if not Path(__lowercase).joinpath('''train.len''').exists():
save_len_file(__lowercase , __lowercase)
else:
__UpperCamelCase :Union[str, Any] = '''examples/seq2seq/test_data/wmt_en_ro'''
__UpperCamelCase :Optional[int] = max_len * 4
save_len_file(__lowercase , __lowercase)
__UpperCamelCase :str = AutoTokenizer.from_pretrained(__lowercase)
__UpperCamelCase :List[Any] = SeqaSeqDataset(
__lowercase , data_dir=__lowercase , type_path='''train''' , max_source_length=__lowercase , max_target_length=__lowercase , n_obs=__lowercase , )
return ds, max_tokens, tokenizer
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._get_dataset()
__UpperCamelCase :List[str] = set(DistributedSortishSampler(__lowercase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__lowercase))
__UpperCamelCase :Tuple = set(DistributedSortishSampler(__lowercase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__lowercase))
assert idsa.intersection(__lowercase) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCamelCase__ ( self , __lowercase) -> List[Any]:
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase)
if tok_name == MBART_TINY:
__UpperCamelCase :Optional[Any] = SeqaSeqDataset(
__lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__UpperCamelCase :Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__UpperCamelCase :Tuple = SeqaSeqDataset(
__lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__UpperCamelCase :Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__lowercase) == 1 if tok_name == BART_TINY else len(__lowercase) == 0
| 105 | 1 |
from __future__ import annotations
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Optional[Any] = order
# a_{0} ... a_{k}
_A: Optional[Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_A: Tuple = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_A: Any = [0.0] * self.order
# y[n-1] ... y[n-k]
_A: int = [0.0] * self.order
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : list[float] , lowerCAmelCase_ : list[float] ):
"""simple docstring"""
if len(lowerCAmelCase_ ) < self.order:
_A: int = [1.0, *a_coeffs]
if len(lowerCAmelCase_ ) != self.order + 1:
_A: Union[str, Any] = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
if len(lowerCAmelCase_ ) != self.order + 1:
_A: Any = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCAmelCase_ )}"""
)
raise ValueError(lowerCAmelCase_ )
_A: Tuple = a_coeffs
_A: Optional[int] = b_coeffs
def __magic_name__ ( self : Any , lowerCAmelCase_ : float ):
"""simple docstring"""
_A: str = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_A: Tuple = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_A: Optional[Any] = self.input_history[:-1]
_A: Any = self.output_history[:-1]
_A: Union[str, Any] = sample
_A: Dict = result
return result
| 121 |
from __future__ import annotations
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: List[str] = data
_A: Node | None = None
_A: Node | None = None
def lowerCamelCase__ ( a ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCamelCase__ ( a ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def lowerCamelCase__ ( a ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCamelCase__ ( ) -> None: # Main function for testing.
_A: Optional[int] = Node(1 )
_A: int = Node(2 )
_A: str = Node(3 )
_A: Union[str, Any] = Node(4 )
_A: Dict = Node(5 )
_A: int = Node(6 )
_A: Optional[Any] = Node(7 )
_A: List[str] = Node(8 )
_A: int = Node(9 )
print(is_full_binary_tree(a ) )
print(depth_of_tree(a ) )
print('''Tree is: ''' )
display(a )
if __name__ == "__main__":
main()
| 121 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase = {
'''allenai/led-base-16384''': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase () -> Union[str, Any]:
lowercase :Tuple = (
list(range(ord('''!''') , ord('''~''') + 1)) + list(range(ord('''¡''') , ord('''¬''') + 1)) + list(range(ord('''®''') , ord('''ÿ''') + 1))
)
lowercase :List[Any] = bs[:]
lowercase :Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(a_)
cs.append(2**8 + n)
n += 1
lowercase :Dict = [chr(a_) for n in cs]
return dict(zip(a_ , a_))
def lowerCamelCase (a_ :Union[str, Any]) -> Dict:
lowercase :int = set()
lowercase :Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowercase :Any = char
return pairs
class __magic_name__ ( __UpperCAmelCase ):
__A : Optional[int] = VOCAB_FILES_NAMES
__A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : int , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Any="replace" , snake_case__ : Any="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Optional[Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : List[str]="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Tuple="<mask>" , snake_case__ : Optional[Any]=False , **snake_case__ : Optional[Any] , ):
'''simple docstring'''
lowercase :Optional[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
lowercase :Dict = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
lowercase :Union[str, Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
lowercase :Dict = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
lowercase :List[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
lowercase :Tuple = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase :List[Any] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding='''utf-8''' ) as vocab_handle:
lowercase :Union[str, Any] = json.load(snake_case__ )
lowercase :Any = {v: k for k, v in self.encoder.items()}
lowercase :str = errors # how to handle errors in decoding
lowercase :Dict = bytes_to_unicode()
lowercase :Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case__ , encoding='''utf-8''' ) as merges_handle:
lowercase :Dict = merges_handle.read().split('''\n''' )[1:-1]
lowercase :Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase :Dict = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase :Any = {}
lowercase :Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase :Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __snake_case ( self : Tuple ):
'''simple docstring'''
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : List[Any] , snake_case__ : Optional[Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase :List[Any] = tuple(snake_case__ )
lowercase :List[str] = get_pairs(snake_case__ )
if not pairs:
return token
while True:
lowercase :int = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase :Union[str, Any] = bigram
lowercase :Union[str, Any] = []
lowercase :List[str] = 0
while i < len(snake_case__ ):
try:
lowercase :str = word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase :List[str] = j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase :Optional[int] = tuple(snake_case__ )
lowercase :Tuple = new_word
if len(snake_case__ ) == 1:
break
else:
lowercase :Tuple = get_pairs(snake_case__ )
lowercase :Optional[int] = ''' '''.join(snake_case__ )
lowercase :int = word
return word
def __snake_case ( self : Any , snake_case__ : int ):
'''simple docstring'''
lowercase :str = []
for token in re.findall(self.pat , snake_case__ ):
lowercase :Union[str, Any] = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case__ ).split(''' ''' ) )
return bpe_tokens
def __snake_case ( self : Optional[int] , snake_case__ : List[Any] ):
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : str , snake_case__ : str ):
'''simple docstring'''
return self.decoder.get(snake_case__ )
def __snake_case ( self : str , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Any = ''''''.join(snake_case__ )
lowercase :Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __snake_case ( self : int , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase :Tuple = os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase :str = os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '''\n''' )
lowercase :Tuple = 0
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowercase :List[str] = token_index
writer.write(''' '''.join(snake_case__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __snake_case ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase :Optional[Any] = [self.cls_token_id]
lowercase :Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def __snake_case ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase :List[Any] = [self.sep_token_id]
lowercase :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : int , snake_case__ : Any , snake_case__ : List[str]=False , **snake_case__ : int ):
'''simple docstring'''
lowercase :Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case__ ) > 0 and not text[0].isspace()):
lowercase :Any = ''' ''' + text
return (text, kwargs)
def __snake_case ( self : Dict , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
'''simple docstring'''
lowercase :int = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
lowercase :str = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase :Tuple = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase :Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(snake_case__ )
if needs_to_be_padded:
lowercase :List[str] = len(snake_case__ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase :Union[str, Any] = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase :List[str] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 361 |
"""simple docstring"""
def lowerCamelCase (a_ :int , a_ :int) -> int:
while a != 0:
lowercase , lowercase :Dict = b % a, a
return b
def lowerCamelCase (a_ :int , a_ :int) -> int:
if gcd(a_ , a_) != 1:
lowercase :List[Any] = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(a_)
lowercase , lowercase , lowercase :List[str] = 1, 0, a
lowercase , lowercase , lowercase :int = 0, 1, m
while va != 0:
lowercase :Union[str, Any] = ua // va
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase :Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 172 | 0 |
'''simple docstring'''
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase (__A , __A , __A=0):
"""simple docstring"""
if name is None:
_a = None
else:
_a = '''.''' * max(0 , spaces - 2) + '''# {:''' + str(50 - spaces) + '''s}'''
_a = fmt.format(_A)
# Print and recurse (if needed).
if isinstance(_A , _A):
if msg is not None:
print(_A)
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2)
elif isinstance(_A , torch.Tensor):
print(_A , ''':''' , val.size())
else:
print(_A , ''':''' , _A)
def lowerCAmelCase (__A , __A , __A , __A , __A):
"""simple docstring"""
_a = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_a = (num_heads, hidden_size, num_splits) + input_shape[1:]
_a = param.view(*_A)
_a = param.transpose(0 , 2)
_a = param.transpose(1 , 2).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_a = (num_heads, num_splits, hidden_size) + input_shape[1:]
_a = param.view(*_A)
_a = param.transpose(0 , 1).contiguous()
_a = param.view(*_A)
return param
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
_a = {}
# old versions did not store training args
_a = input_state_dict.get('''args''' , _A)
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_a = ds_args.padded_vocab_size
_a = ds_args.max_position_embeddings
_a = ds_args.hidden_size
_a = ds_args.num_layers
_a = ds_args.num_attention_heads
_a = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_a = config.n_head
# The hidden_size per head.
_a = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_a = input_state_dict['''checkpoint_version''']
else:
_a = 0.0
# The model.
_a = input_state_dict['''model''']
# The language model.
_a = model['''language_model''']
# The embeddings.
_a = lm['''embedding''']
# The word embeddings.
_a = embeddings['''word_embeddings''']['''weight''']
# Truncate the embedding table to vocab_size rows.
_a = word_embeddings[: config.vocab_size, :]
_a = word_embeddings
# The position embeddings.
_a = embeddings['''position_embeddings''']['''weight''']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_a = pos_embeddings.size(0)
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''')
# Store the position embeddings.
_a = pos_embeddings
# The transformer.
_a = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder''']
# The regex to extract layer names.
_a = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''')
# The simple map of names for "automated" rules.
_a = {
'''attention.dense''': '''.attn.c_proj.''',
'''self_attention.dense''': '''.attn.c_proj.''',
'''mlp.dense_h_to_4h''': '''.mlp.c_fc.''',
'''mlp.dense_4h_to_h''': '''.mlp.c_proj.''',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_a = layer_re.match(_A)
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_a = int(m.group(1))
# The name of the operation.
_a = m.group(2)
# Is it a weight or a bias?
_a = m.group(3)
# The name of the layer.
_a = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm'''):
_a = '''ln_1''' if op_name.startswith('''input''') else '''ln_2'''
_a = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_a = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa)).view(
1 , 1 , _A , _A)
_a = causal_mask
# Insert a "dummy" tensor for masked_bias.
_a = torch.tensor(-1e4 , dtype=torch.floataa)
_a = masked_bias
_a = fix_query_key_value_ordering(_A , _A , 3 , _A , _A)
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_a = out_val.transpose(0 , 1).contiguous()
# Store.
_a = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_a = fix_query_key_value_ordering(_A , _A , 3 , _A , _A)
# Store. No change of shape.
_a = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_a = megatron_to_transformers[op_name]
_a = val.transpose(0 , 1)
# Copy the bias.
elif weight_or_bias == "bias":
_a = megatron_to_transformers[op_name]
_a = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_a = transformer['''final_layernorm.weight''']
_a = transformer['''final_layernorm.bias''']
# For LM head, transformers' wants the matrix to weight embeddings.
_a = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase ():
"""simple docstring"""
_a = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''')
parser.add_argument(
'''path_to_checkpoint''' , type=_A , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=_A , help='''An optional config json file describing the pre-trained model.''' , )
_a = parser.parse_args()
# Extract the basename.
_a = os.path.dirname(args.path_to_checkpoint)
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''')
if args.path_to_checkpoint.endswith('''.zip'''):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''') as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''') as pytorch_dict:
_a = torch.load(_A , map_location='''cpu''')
else:
_a = torch.load(args.path_to_checkpoint , map_location='''cpu''')
_a = input_state_dict.get('''args''' , _A)
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_a = '''gelu_fast'''
elif ds_args.openai_gelu:
_a = '''gelu_new'''
else:
_a = '''gelu'''
else:
# in the very early days this used to be "gelu_new"
_a = '''gelu_new'''
# Spell out all parameters in case the defaults change.
_a = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
_a = GPTaConfig.from_json_file(args.config_file)
_a = ['''GPT2LMHeadModel''']
# Convert.
print('''Converting''')
_a = convert_megatron_checkpoint(_A , _A , _A)
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A)
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_a = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_a = '''gpt2'''
elif tokenizer_type == "PretrainedFromHF":
_a = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''')
else:
_a = '''gpt2'''
_a = AutoTokenizer.from_pretrained(_A)
_a = type(_A).__name__
_a = tokenizer_class
# Store the config to file.
print('''Saving config''')
config.save_pretrained(_A)
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''')
tokenizer.save_pretrained(_A)
# Store the state_dict to file.
_a = os.path.join(_A , '''pytorch_model.bin''')
print(F'''Saving checkpoint to \"{output_checkpoint_file}\"''')
torch.save(_A , _A)
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 211 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 272 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCamelCase : List[Any] = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358 |
# using dfs for finding eulerian path traversal
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=None ) -> Any:
snake_case : Union[str, Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
snake_case , snake_case : int = True, True
snake_case : List[Any] = dfs(lowercase ,lowercase ,lowercase ,lowercase )
return path
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
snake_case : Union[str, Any] = 0
snake_case : Union[str, Any] = -1
for i in range(lowercase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
snake_case : str = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
snake_case : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
snake_case , snake_case : Any = check_circuit_or_path(lowercase ,lowercase )
if check == 3:
print("""graph is not Eulerian""" )
print("""no path""" )
return
snake_case : str = 1
if check == 2:
snake_case : Optional[int] = odd_node
print("""graph has a Euler path""" )
if check == 1:
print("""graph has a Euler cycle""" )
snake_case : Dict = dfs(lowercase ,lowercase ,lowercase )
print(lowercase )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : Union[str, Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
snake_case : Optional[int] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
snake_case : Optional[int] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
snake_case : List[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
snake_case : Tuple = {
1: [],
2: []
# all degree is zero
}
snake_case : Tuple = 10
check_euler(lowercase ,lowercase )
check_euler(lowercase ,lowercase )
check_euler(lowercase ,lowercase )
check_euler(lowercase ,lowercase )
check_euler(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 176 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'switch_transformers'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Dict,lowercase_ : Optional[int]=3_2_1_2_8,lowercase_ : List[Any]=7_6_8,lowercase_ : Union[str, Any]=6_4,lowercase_ : Union[str, Any]=2_0_4_8,lowercase_ : Dict=6_4,lowercase_ : List[Any]=1_2,lowercase_ : Optional[int]=3,lowercase_ : List[Any]=1_2,lowercase_ : Dict=3,lowercase_ : Any=1_2,lowercase_ : Optional[int]=8,lowercase_ : str=False,lowercase_ : Dict=0.01,lowercase_ : Optional[Any]="float32",lowercase_ : Any=False,lowercase_ : str=3_2,lowercase_ : List[Any]=1_2_8,lowercase_ : int=0.1,lowercase_ : Union[str, Any]=1E-6,lowercase_ : Dict=0.001,lowercase_ : List[Any]=0.001,lowercase_ : Dict=1.0,lowercase_ : Optional[int]="relu",lowercase_ : Dict=True,lowercase_ : Union[str, Any]=False,lowercase_ : Union[str, Any]=True,lowercase_ : List[str]=0,lowercase_ : int=1,**lowercase_ : Union[str, Any],)-> Tuple:
'''simple docstring'''
A__ = vocab_size
A__ = d_model
A__ = d_kv
A__ = d_ff
A__ = num_sparse_encoder_layers
A__ = num_layers
A__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A__ = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
A__ = self.num_layers // self.num_sparse_encoder_layers
else:
A__ = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
A__ = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
A__ = self.num_decoder_layers # HACK: this will create 0 sparse layers
A__ = num_heads
A__ = num_experts
A__ = expert_capacity
A__ = router_bias
A__ = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
A__ = router_dtype
A__ = router_ignore_padding_tokens
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = initializer_factor
A__ = feed_forward_proj
A__ = use_cache
A__ = add_router_probs
A__ = router_z_loss_coef
A__ = router_aux_loss_coef
A__ = self.feed_forward_proj.split('-' )
A__ = act_info[-1]
A__ = act_info[0] == 'gated'
if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A__ = 'gelu_new'
super().__init__(
pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,**lowercase_,)
| 7 |
from typing import Dict
from .base import GenericTensor, Pipeline
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : int,lowercase_ : Dict=None,lowercase_ : Tuple=None,lowercase_ : List[Any]=None,**lowercase_ : Any )-> Optional[Any]:
'''simple docstring'''
if tokenize_kwargs is None:
A__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
A__ = truncation
A__ = tokenize_kwargs
A__ = {}
if return_tensors is not None:
A__ = return_tensors
return preprocess_params, {}, postprocess_params
def snake_case__ ( self : Dict,lowercase_ : List[Any],**lowercase_ : Tuple )-> Dict[str, GenericTensor]:
'''simple docstring'''
A__ = self.framework
A__ = self.tokenizer(lowercase_,return_tensors=lowercase_,**lowercase_ )
return model_inputs
def snake_case__ ( self : Tuple,lowercase_ : int )-> Optional[Any]:
'''simple docstring'''
A__ = self.model(**lowercase_ )
return model_outputs
def snake_case__ ( self : Tuple,lowercase_ : Tuple,lowercase_ : List[str]=False )-> Any:
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : List[Any],*lowercase_ : int,**lowercase_ : Optional[Any] )-> int:
'''simple docstring'''
return super().__call__(*lowercase_,**lowercase_ )
| 7 | 1 |
"""simple docstring"""
def _A ( lowercase ):
"""simple docstring"""
a =len(lowercase )
a =sum(lowercase )
a =[[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
a =True
for i in range(1 , s + 1 ):
a =False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
a =dp[i][j - 1]
if arr[i - 1] <= j:
a =dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
a =s - 2 * j
break
return diff | 215 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = 42
# setable values
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = None
@classmethod
def SCREAMING_SNAKE_CASE ( cls , __A , __A , __A ) -> List[str]:
return cls(common=__A , init_noise_sigma=__A , timesteps=__A )
@dataclass
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = 42
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCAmelCase = 42
@property
def SCREAMING_SNAKE_CASE ( self ) -> str:
return True
@register_to_config
def __init__( self , __A = 1000 , __A = 0.0_001 , __A = 0.02 , __A = "linear" , __A = None , __A = "fixed_small" , __A = True , __A = "epsilon" , __A = jnp.floataa , ) -> List[Any]:
a =dtype
def SCREAMING_SNAKE_CASE ( self , __A = None ) -> DDPMSchedulerState:
if common is None:
a =CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
a =jnp.array(1.0 , dtype=self.dtype )
a =jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__A , init_noise_sigma=__A , timesteps=__A , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None ) -> jnp.ndarray:
return sample
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = () ) -> DDPMSchedulerState:
a =self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
a =(jnp.arange(0 , __A ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__A , timesteps=__A , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=None , __A=None ) -> str:
a =state.common.alphas_cumprod[t]
a =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
a =(1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
a =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
a =jnp.clip(__A , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
a =jnp.log(jnp.clip(__A , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
a =state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
a =jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
a =variance
a =state.common.betas[t]
a =(predicted_variance + 1) / 2
a =frac * max_log + (1 - frac) * min_log
return variance
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , __A = None , __A = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
a =timestep
if key is None:
a =jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
a , a =jnp.split(__A , sample.shape[1] , axis=1 )
else:
a =None
# 1. compute alphas, betas
a =state.common.alphas_cumprod[t]
a =jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
a =1 - alpha_prod_t
a =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
a =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
a =model_output
elif self.config.prediction_type == "v_prediction":
a =(alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
a =jnp.clip(__A , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a =(alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
a =state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
a =jax.random.split(__A , num=1 )
a =jax.random.normal(__A , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__A , __A , predicted_variance=__A ) ** 0.5) * noise
a =jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
a =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__A , state=__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , ) -> jnp.ndarray:
return add_noise_common(state.common , __A , __A , __A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A , ) -> jnp.ndarray:
return get_velocity_common(state.common , __A , __A , __A )
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps | 215 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=A__ ):
"""simple docstring"""
a = ["keras_nlp"]
def __init__( self : str , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> int:
requires_backends(self , ['''keras_nlp'''] )
| 314 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_SCREAMING_SNAKE_CASE : Dict = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_SCREAMING_SNAKE_CASE : Optional[int] = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = set()
SCREAMING_SNAKE_CASE__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE__ = char
SCREAMING_SNAKE_CASE__ = set(_A )
return pairs
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = VOCAB_FILES_NAMES
a = PRETRAINED_VOCAB_FILES_MAP
a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[str]="</s>" , __lowerCamelCase : Dict="</s>" , __lowerCamelCase : Dict="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Union[str, Any]="<mask>" , **__lowerCamelCase : Optional[int] , ) -> Union[str, Any]:
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = vocab_file
SCREAMING_SNAKE_CASE__ = merges_file
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 3
self.add_from_file(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
SCREAMING_SNAKE_CASE__ = merges_handle.read().split('''\n''' )[:-1]
SCREAMING_SNAKE_CASE__ = [tuple(merge.split()[:-1] ) for merge in merges]
SCREAMING_SNAKE_CASE__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE__ = {}
def lowercase_ ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Dict ) -> str:
return len(self.encoder )
def lowercase_ ( self : List[Any] ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Any , __lowerCamelCase : Any ) -> Any:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE__ = min(__lowerCamelCase , key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = bigram
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = 0
while i < len(__lowerCamelCase ):
try:
SCREAMING_SNAKE_CASE__ = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE__ = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE__ = tuple(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = new_word
if len(__lowerCamelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE__ = get_pairs(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = '''@@ '''.join(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = word[:-4]
SCREAMING_SNAKE_CASE__ = word
return word
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = re.findall(r'''\S+\n?''' , __lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowercase_ ( self : str , __lowerCamelCase : Optional[int] ) -> Optional[int]:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : List[Any] , __lowerCamelCase : List[str] ) -> Dict:
return self.decoder.get(__lowerCamelCase , self.unk_token )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowercase_ ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.vocab_file , __lowerCamelCase )
if os.path.abspath(self.merges_file ) != os.path.abspath(__lowerCamelCase ):
copyfile(self.merges_file , __lowerCamelCase )
return out_vocab_file, out_merge_file
def lowercase_ ( self : int , __lowerCamelCase : Tuple ) -> Optional[Any]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
with open(__lowerCamelCase , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__lowerCamelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
SCREAMING_SNAKE_CASE__ = f.readlines()
for lineTmp in lines:
SCREAMING_SNAKE_CASE__ = lineTmp.strip()
SCREAMING_SNAKE_CASE__ = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
SCREAMING_SNAKE_CASE__ = line[:idx]
SCREAMING_SNAKE_CASE__ = len(self.encoder )
| 314 | 1 |
from __future__ import annotations
def lowerCamelCase_ ( _a ):
"""simple docstring"""
create_state_space_tree(_a , [] , 0 , [0 for i in range(len(_a ) )] )
def lowerCamelCase_ ( _a , _a , _a , _a , ):
"""simple docstring"""
if index == len(_a ):
print(_a )
return
for i in range(len(_a ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCAmelCase__ : List[Any] = True
create_state_space_tree(_a , _a , index + 1 , _a )
current_sequence.pop()
lowerCAmelCase__ : Tuple = False
lowerCamelCase = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCamelCase = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 211 |
def lowerCamelCase_ ( _a = 4_000_000 ):
"""simple docstring"""
lowerCAmelCase__ : str = []
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_a )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = b, a + b
return sum(_a )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 211 | 1 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ : Tuple = {
"vocab_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json",
},
"merges_file": {
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt",
},
"tokenizer_file": {
"Salesforce/codegen-350M-mono": (
"https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"
),
},
}
lowercase__ : Optional[int] = {
"Salesforce/codegen-350M-mono": 2048,
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = CodeGenTokenizer
def __init__( self : Union[str, Any] , __lowercase : Any=None , __lowercase : Any=None , __lowercase : Union[str, Any]=None , __lowercase : Dict="<|endoftext|>" , __lowercase : Dict="<|endoftext|>" , __lowercase : Any="<|endoftext|>" , __lowercase : List[Any]=False , **__lowercase : List[Any] , ):
"""simple docstring"""
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , add_prefix_space=__lowercase , **__lowercase , )
if kwargs.pop("add_bos_token" , __lowercase ):
snake_case_ = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __lowercase ) != add_prefix_space:
snake_case_ = getattr(__lowercase , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**__lowercase )
snake_case_ = add_prefix_space
def snake_case__ ( self : Optional[Any] , *__lowercase : Optional[int] , **__lowercase : List[Any] ):
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def snake_case__ ( self : Tuple , *__lowercase : Optional[int] , **__lowercase : Optional[int] ):
"""simple docstring"""
snake_case_ = kwargs.get("is_split_into_words" , __lowercase )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowercase , **__lowercase )
def snake_case__ ( self : Optional[int] , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
snake_case_ = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def snake_case__ ( self : List[str] , __lowercase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , __lowercase : bool = False , __lowercase : bool = None , __lowercase : Optional[List[str]] = None , **__lowercase : Any , ):
"""simple docstring"""
snake_case_ = super().decode(
token_ids=__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
if truncate_before_pattern is not None and len(__lowercase ) > 0:
snake_case_ = self.truncate(__lowercase , __lowercase )
return decoded_text
def snake_case__ ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : str ):
"""simple docstring"""
def find_re(__lowercase : Tuple , __lowercase : int , __lowercase : int ):
snake_case_ = pattern.search(__lowercase , __lowercase )
return m.start() if m else -1
snake_case_ = [re.compile(__lowercase , re.MULTILINE ) for pattern in truncate_before_pattern]
snake_case_ = list(re.finditer("^print" , __lowercase , re.MULTILINE ) )
if len(__lowercase ) > 1:
snake_case_ = completion[: prints[1].start()]
snake_case_ = list(re.finditer("^def" , __lowercase , re.MULTILINE ) )
if len(__lowercase ) > 1:
snake_case_ = completion[: defs[1].start()]
snake_case_ = 0
snake_case_ = [
pos for pos in [find_re(__lowercase , __lowercase , __lowercase ) for terminal in terminals] if pos != -1
]
if len(__lowercase ) > 0:
return completion[: min(__lowercase )]
else:
return completion
| 187 |
from __future__ import annotations
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = str(_A )
return len(_A ) == 9 and set(_A ) == set("123456789" )
def lowerCamelCase__ ( ):
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
snake_case_ = 100002 * base_num
if is_9_pandigital(_A ):
return candidate
for base_num in range(333 , 99 , -1 ):
snake_case_ = 1002003 * base_num
if is_9_pandigital(_A ):
return candidate
return None
if __name__ == "__main__":
print(f'''{solution() = }''')
| 187 | 1 |
def _a ( SCREAMING_SNAKE_CASE_ : int = 10_00 ):
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 350 |
import math
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( SCREAMING_SNAKE_CASE_ : float = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(SCREAMING_SNAKE_CASE_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 | 0 |
def A_ ( ):
SCREAMING_SNAKE_CASE_: List[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(_UpperCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 13 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[Any] = {'vocab_file': 'spiece.model'}
a_ : Dict = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
a_ : Tuple = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
a_ : int = 0
a_ : Optional[int] = 1
a_ : int = 2
a_ : Union[str, Any] = 3
a_ : List[str] = 4
class _snake_case ( A__ ):
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = '''left'''
def __init__( self , a , a=False , a=True , a=False , a="<s>" , a="</s>" , a="<unk>" , a="<sep>" , a="<pad>" , a="<cls>" , a="<mask>" , a=["<eop>", "<eod>"] , a = None , **a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return len(self.sp_model)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
if self.remove_space:
SCREAMING_SNAKE_CASE = ' '.join(inputs.strip().split())
else:
SCREAMING_SNAKE_CASE = inputs
SCREAMING_SNAKE_CASE = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
SCREAMING_SNAKE_CASE = unicodedata.normalize('NFKD' , a)
SCREAMING_SNAKE_CASE = ''.join([c for c in outputs if not unicodedata.combining(a)])
if self.do_lower_case:
SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
SCREAMING_SNAKE_CASE = self.preprocess_text(a)
SCREAMING_SNAKE_CASE = self.sp_model.encode(a , out_type=a)
SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(a) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(a)
else:
new_pieces.append(a)
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
return self.sp_model.PieceToId(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
return self.sp_model.IdToPiece(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
SCREAMING_SNAKE_CASE = ''.join(a).replace(a , ' ').strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , a , a = False , a = None , a = True , **a , ) -> str:
SCREAMING_SNAKE_CASE = kwargs.pop('use_source_tokenizer' , a)
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(a , skip_special_tokens=a)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a))
SCREAMING_SNAKE_CASE = []
sub_texts.append(a)
else:
current_sub_text.append(a)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE = ''.join(a)
SCREAMING_SNAKE_CASE = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE = self.clean_up_tokenization(a)
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is not None:
return ([0] * len(a)) + [1] + ([0] * len(a)) + [1, 1]
return ([0] * len(a)) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
if not os.path.isdir(a):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , a)
elif not os.path.isfile(self.vocab_file):
with open(a , 'wb') as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(a)
return (out_vocab_file,)
| 137 | 0 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase = TypeVar('''KEY''')
UpperCamelCase = TypeVar('''VAL''')
@dataclass(frozen=UpperCAmelCase_ , slots=UpperCAmelCase_ )
class lowerCAmelCase_ ( Generic[KEY, VAL] ):
'''simple docstring'''
UpperCamelCase_ : KEY
UpperCamelCase_ : VAL
class lowerCAmelCase_ ( _Item ):
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> None:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __bool__( self : str ) -> bool:
'''simple docstring'''
return False
UpperCamelCase = _DeletedItem()
class lowerCAmelCase_ ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : float = 0.75 ) -> None:
'''simple docstring'''
A: Union[str, Any] = initial_block_size
A: list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
A: int = capacity_factor
A: List[Any] = 0
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : KEY ) -> int:
'''simple docstring'''
return hash(SCREAMING_SNAKE_CASE_ ) % len(self._buckets )
def _snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : KEY , SCREAMING_SNAKE_CASE_ : VAL ) -> bool:
'''simple docstring'''
A: int = self._buckets[ind]
if not stored:
A: Union[str, Any] = _Item(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self._len += 1
return True
elif stored.key == key:
A: Any = _Item(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return True
else:
return False
def _snake_case ( self : Union[str, Any] ) -> bool:
'''simple docstring'''
A: Optional[int] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Optional[Any] ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
A: List[Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> None:
'''simple docstring'''
A: Optional[int] = self._buckets
A: Union[str, Any] = [None] * new_size
A: Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _snake_case ( self : Tuple ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _snake_case ( self : Tuple ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : KEY ) -> Iterator[int]:
'''simple docstring'''
A: Tuple = self._get_bucket_index(SCREAMING_SNAKE_CASE_ )
for _ in range(len(self._buckets ) ):
yield ind
A: int = self._get_next_ind(SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : KEY , SCREAMING_SNAKE_CASE_ : VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ):
if self._try_set(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
break
def __setitem__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : KEY , SCREAMING_SNAKE_CASE_ : VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __delitem__( self : Any , SCREAMING_SNAKE_CASE_ : KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ):
A: Optional[int] = self._buckets[ind]
if item is None:
raise KeyError(SCREAMING_SNAKE_CASE_ )
if item is _deleted:
continue
if item.key == key:
A: str = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE_ : KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(SCREAMING_SNAKE_CASE_ ):
A: Union[str, Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(SCREAMING_SNAKE_CASE_ )
def __len__( self : List[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self : Optional[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ) -> str:
'''simple docstring'''
A: Any = ''' ,'''.join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 334 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = ["""input_features""", """attention_mask"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_60_00 , SCREAMING_SNAKE_CASE_ : int=80 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> List[Any]:
'''simple docstring'''
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = num_mel_bins
A: str = do_ceptral_normalize
A: int = normalize_means
A: List[Any] = normalize_vars
A: Any = True
def _snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , ) -> np.ndarray:
'''simple docstring'''
A: Optional[int] = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
A: Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
A: List[Any] = ta_kaldi.fbank(SCREAMING_SNAKE_CASE_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _snake_case ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : float = 0.0 , ) -> np.ndarray:
'''simple docstring'''
if normalize_means:
A: str = x[:input_length].mean(axis=0 )
A: Dict = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if normalize_vars:
A: Tuple = x[:input_length].std(axis=0 )
A: List[Any] = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if input_length < x.shape[0]:
A: Optional[int] = padding_value
# make sure array is in float32
A: Optional[Any] = x.astype(np.floataa )
return x
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
A: int = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A: Any = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
A: Optional[Any] = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A: Optional[int] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
A: int = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A: Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A: Union[str, Any] = [raw_speech]
# extract fbank features
A: str = [self._extract_fbank_features(SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech]
# convert into correct format for padding
A: int = BatchFeature({'''input_features''': features} )
A: int = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# make sure list is in array format
A: List[str] = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ):
A: Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features]
A: List[Any] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
A: Dict = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
A: Dict = (
np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A: List[Any] = self.normalize(
padded_inputs['''input_features'''] , attention_mask=SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
A: Dict = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 334 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 293 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__A = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :List[str] = XGLMTokenizer
__magic_name__ :Any = XGLMTokenizerFast
__magic_name__ :Dict = True
__magic_name__ :Union[str, Any] = True
def snake_case ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ :int = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = '<pad>'
lowerCAmelCase__ :int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_8 )
def snake_case ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase__ :int = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowerCAmelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def snake_case ( self ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def snake_case ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCAmelCase , f.name )
lowerCAmelCase__ :Dict = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = pickle.dumps(__UpperCAmelCase )
pickle.loads(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ :Optional[Any] = self.get_tokenizer()
lowerCAmelCase__ :List[str] = self.get_rust_tokenizer()
lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ :Dict = tokenizer.tokenize(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = self.get_rust_tokenizer()
lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 'Hello World!'
lowerCAmelCase__ :Tuple = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowerCAmelCase__ :List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = {
'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='facebook/xglm-564M' , padding=__UpperCAmelCase , )
| 293 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=6 ,__UpperCamelCase=17 ,__UpperCamelCase=23 ,__UpperCamelCase=11 ,__UpperCamelCase=True ,) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : Any = seq_length
lowercase_ : List[str] = act_dim
lowercase_ : int = state_dim
lowercase_ : Dict = hidden_size
lowercase_ : int = max_length
lowercase_ : int = is_training
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Any = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase_ : int = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase_ : List[str] = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ : Dict = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase_ : Tuple = ids_tensor((self.batch_size, self.seq_length) ,vocab_size=1000 )
lowercase_ : int = random_attention_mask((self.batch_size, self.seq_length) )
lowercase_ : Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size ,seq_length=self.seq_length ,act_dim=self.act_dim ,state_dim=self.state_dim ,hidden_size=self.hidden_size ,max_length=self.max_length ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = DecisionTransformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Tuple = model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
self.parent.assertEqual(result.state_preds.shape ,states.shape )
self.parent.assertEqual(result.action_preds.shape ,actions.shape )
self.parent.assertEqual(result.return_preds.shape ,returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Dict = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[Any] = config_and_inputs
lowercase_ : Optional[Any] = {
'states': states,
'actions': actions,
'rewards': rewards,
'returns_to_go': returns_to_go,
'timesteps': timesteps,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (DecisionTransformerModel,) if is_torch_available() else ()
lowercase = ()
lowercase = {'feature-extraction': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowercase = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = DecisionTransformerModelTester(self )
lowercase_ : str = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = DecisionTransformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(__UpperCamelCase )
lowercase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Optional[Any] = [*signature.parameters.keys()]
lowercase_ : Tuple = [
'states',
'actions',
'rewards',
'returns_to_go',
'timesteps',
'attention_mask',
]
self.assertListEqual(arg_names[: len(__UpperCamelCase )] ,__UpperCamelCase )
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[Any] = 2 # number of steps of autoregressive prediction we will perform
lowercase_ : Any = 10 # defined by the RL environment, may be normalized
lowercase_ : Union[str, Any] = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert' )
lowercase_ : List[str] = model.to(__UpperCamelCase )
lowercase_ : Any = model.config
torch.manual_seed(0 )
lowercase_ : int = torch.randn(1 ,1 ,config.state_dim ).to(device=__UpperCamelCase ,dtype=torch.floataa ) # env.reset()
lowercase_ : List[Any] = torch.tensor(
[[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] ,device=__UpperCamelCase )
lowercase_ : Any = torch.tensor(__UpperCamelCase ,device=__UpperCamelCase ,dtype=torch.floataa ).reshape(1 ,1 ,1 )
lowercase_ : str = state
lowercase_ : Dict = torch.zeros(1 ,0 ,config.act_dim ,device=__UpperCamelCase ,dtype=torch.floataa )
lowercase_ : Any = torch.zeros(1 ,0 ,device=__UpperCamelCase ,dtype=torch.floataa )
lowercase_ : Union[str, Any] = torch.tensor(0 ,device=__UpperCamelCase ,dtype=torch.long ).reshape(1 ,1 )
for step in range(__UpperCamelCase ):
lowercase_ : Union[str, Any] = torch.cat([actions, torch.zeros(1 ,1 ,config.act_dim ,device=__UpperCamelCase )] ,dim=1 )
lowercase_ : Tuple = torch.cat([rewards, torch.zeros(1 ,1 ,device=__UpperCamelCase )] ,dim=1 )
lowercase_ : List[Any] = torch.ones(1 ,states.shape[1] ).to(dtype=torch.long ,device=states.device )
with torch.no_grad():
lowercase_ , lowercase_ , lowercase_ : Tuple = model(
states=__UpperCamelCase ,actions=__UpperCamelCase ,rewards=__UpperCamelCase ,returns_to_go=__UpperCamelCase ,timesteps=__UpperCamelCase ,attention_mask=__UpperCamelCase ,return_dict=__UpperCamelCase ,)
self.assertEqual(action_pred.shape ,actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] ,expected_outputs[step] ,atol=1e-4 ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = ( # env.step(action)
torch.randn(1 ,1 ,config.state_dim ).to(device=__UpperCamelCase ,dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase_ : Optional[int] = action_pred[0, -1]
lowercase_ : Optional[Any] = torch.cat([states, state] ,dim=1 )
lowercase_ : str = returns_to_go[0, -1] - reward
lowercase_ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 ,1 ,1 )] ,dim=1 )
lowercase_ : int = torch.cat(
[timesteps, torch.ones((1, 1) ,device=__UpperCamelCase ,dtype=torch.long ) * (step + 1)] ,dim=1 )
| 321 | """simple docstring"""
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ):
require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
| 321 | 1 |
from __future__ import annotations
from collections.abc import MutableSequence
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : MutableSequence[float] ):
if len(__lowerCamelCase ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
UpperCamelCase :list[float] = list(__lowerCamelCase )
UpperCamelCase :Dict = degree
def __add__( self : str , __lowerCamelCase : Polynomial ):
if self.degree > polynomial_a.degree:
UpperCamelCase :List[str] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowerCamelCase )
else:
UpperCamelCase :Dict = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowerCamelCase )
def __sub__( self : int , __lowerCamelCase : Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Any ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : List[str] , __lowerCamelCase : Polynomial ):
UpperCamelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowerCamelCase )
def _A ( self : Tuple , __lowerCamelCase : int | float ):
UpperCamelCase :int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : List[Any] ):
UpperCamelCase :Any = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCamelCase )
return polynomial
def __repr__( self : Optional[Any] ):
return self.__str__()
def _A ( self : Optional[int] ):
UpperCamelCase :list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCamelCase :List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : int | float = 0 ):
UpperCamelCase :list[float] = [0] * (self.degree + 2)
UpperCamelCase :List[Any] = constant
for i in range(self.degree + 1 ):
UpperCamelCase :List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowerCamelCase )
def __eq__( self : Any , __lowerCamelCase : object ):
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[Any] , __lowerCamelCase : object ):
return not self.__eq__(__lowerCamelCase )
| 38 |
import random
def A ( a_ ,a_ ,a_ = False ) -> dict:
__UpperCamelCase : dict ={i: [] for i in range(a_ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(a_ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(a_ ):
for j in range(i + 1 ,a_ ):
if random.random() < probability:
graph[i].append(a_ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(a_ )
return graph
def A ( a_ ) -> dict:
return {
i: [j for j in range(a_ ) if i != j] for i in range(a_ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 0 |
def lowerCAmelCase__ ( _a : int ):
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
snake_case_ : List[str] = gray_code_sequence_string(_a )
#
# convert them to integers
for i in range(len(_a ) ):
snake_case_ : Dict = int(sequence[i] , 2 )
return sequence
def lowerCAmelCase__ ( _a : int ):
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
snake_case_ : str = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
snake_case_ : Any = gray_code_sequence_string(bit_count - 1 )
snake_case_ : Dict = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
snake_case_ : str = "0" + smaller_sequence[i]
sequence.append(_a )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
snake_case_ : Optional[int] = "1" + smaller_sequence[i]
sequence.append(_a )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 365 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 36 | 0 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
_UpperCamelCase: Optional[int] = "us-east-1" # defaults region
@dataclass
class a__ :
_lowerCamelCase = 42
_lowerCamelCase = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
_lowerCamelCase = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 500,
"""save_steps""": 5_500,
}
_lowerCamelCase = {**hyperparameters, """max_steps""": 1_000}
@property
def lowercase ( self : Dict ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowercase ( self : Tuple ) -> str:
return f'''{self.framework}-transfromers-test'''
@property
def lowercase ( self : Any ) -> str:
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowercase ( self : List[str] ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 255 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : int= logging.get_logger(__name__)
_a : Optional[Any]= {
"SCUT-DLVCLab/lilt-roberta-en-base": (
"https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"
),
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[Any] = """lilt"""
def __init__(self : Dict , _A : Any=3_05_22 , _A : Union[str, Any]=7_68 , _A : Any=12 , _A : Tuple=12 , _A : Optional[int]=30_72 , _A : Tuple="gelu" , _A : str=0.1 , _A : List[Any]=0.1 , _A : Union[str, Any]=5_12 , _A : Any=2 , _A : Tuple=0.02 , _A : List[str]=1E-12 , _A : Optional[int]=0 , _A : Optional[Any]="absolute" , _A : Any=None , _A : List[Any]=4 , _A : Optional[int]=10_24 , **_A : Union[str, Any] , ) -> Tuple:
super().__init__(pad_token_id=_A , **_A)
__snake_case : Optional[int] = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : Any = num_hidden_layers
__snake_case : Optional[int] = num_attention_heads
__snake_case : Optional[int] = hidden_act
__snake_case : List[str] = intermediate_size
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : List[Any] = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : List[Any] = initializer_range
__snake_case : Optional[Any] = layer_norm_eps
__snake_case : Optional[int] = position_embedding_type
__snake_case : Any = classifier_dropout
__snake_case : Optional[int] = channel_shrink_ratio
__snake_case : Tuple = max_ad_position_embeddings
| 172 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A__ = checkpoint
A__ = {}
A__ = vae_state_dict['''encoder.conv_in.weight''']
A__ = vae_state_dict['''encoder.conv_in.bias''']
A__ = vae_state_dict['''encoder.conv_out.weight''']
A__ = vae_state_dict['''encoder.conv_out.bias''']
A__ = vae_state_dict['''encoder.norm_out.weight''']
A__ = vae_state_dict['''encoder.norm_out.bias''']
A__ = vae_state_dict['''decoder.conv_in.weight''']
A__ = vae_state_dict['''decoder.conv_in.bias''']
A__ = vae_state_dict['''decoder.conv_out.weight''']
A__ = vae_state_dict['''decoder.conv_out.bias''']
A__ = vae_state_dict['''decoder.norm_out.weight''']
A__ = vae_state_dict['''decoder.norm_out.bias''']
A__ = vae_state_dict['''quant_conv.weight''']
A__ = vae_state_dict['''quant_conv.bias''']
A__ = vae_state_dict['''post_quant_conv.weight''']
A__ = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
A__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(lowerCamelCase_ )
}
# Retrieves the keys for the decoder up blocks only
A__ = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(lowerCamelCase_ )
}
for i in range(lowerCamelCase_ ):
A__ = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
A__ = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
A__ = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
A__ = renew_vae_resnet_paths(lowerCamelCase_ )
A__ = {'''old''': f'down.{i}.block', '''new''': f'down_blocks.{i}.resnets'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
A__ = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
A__ = renew_vae_resnet_paths(lowerCamelCase_ )
A__ = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
A__ = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
A__ = renew_vae_attention_paths(lowerCamelCase_ )
A__ = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
conv_attn_to_linear(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
A__ = num_up_blocks - 1 - i
A__ = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
A__ = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
A__ = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
A__ = renew_vae_resnet_paths(lowerCamelCase_ )
A__ = {'''old''': f'up.{block_id}.block', '''new''': f'up_blocks.{i}.resnets'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
A__ = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
A__ = renew_vae_resnet_paths(lowerCamelCase_ )
A__ = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
A__ = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
A__ = renew_vae_attention_paths(lowerCamelCase_ )
A__ = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
conv_attn_to_linear(lowerCamelCase_ )
return new_checkpoint
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , ) -> str:
'''simple docstring'''
A__ = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
A__ = io.BytesIO(r.content )
A__ = OmegaConf.load(lowerCamelCase_ )
A__ = 512
A__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
A__ = {}
with safe_open(lowerCamelCase_ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
A__ = f.get_tensor(lowerCamelCase_ )
else:
A__ = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_ )['''state_dict''']
# Convert the VAE model.
A__ = create_vae_diffusers_config(lowerCamelCase_ , image_size=lowerCamelCase_ )
A__ = custom_convert_ldm_vae_checkpoint(lowerCamelCase_ , lowerCamelCase_ )
A__ = AutoencoderKL(**lowerCamelCase_ )
vae.load_state_dict(lowerCamelCase_ )
vae.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
lowercase_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 356 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 282 | 0 |
"""simple docstring"""
def __a ( ) ->List[str]:
a__: Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(UpperCamelCase_ )[-10:]
if __name__ == "__main__":
print(solution())
| 290 |
import os
from math import logaa
def _lowercase ( UpperCamelCase_ = "base_exp.txt" ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCamelCase_ ) , UpperCamelCase_ ) ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = list(map(UpperCamelCase_ , line.split(',' ) ) )
if x * logaa(UpperCamelCase_ ) > largest:
SCREAMING_SNAKE_CASE__ = x * logaa(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 176 | 0 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> list:
'''simple docstring'''
lowercase : Dict = len(_UpperCAmelCase )
for i in range(1 , _UpperCAmelCase ):
lowercase : Union[str, Any] = collection[i]
lowercase : List[str] = 0
lowercase : Optional[int] = i - 1
while low <= high:
lowercase : List[str] = (low + high) // 2
if val < collection[mid]:
lowercase : List[Any] = mid - 1
else:
lowercase : int = mid + 1
for j in range(_UpperCAmelCase , _UpperCAmelCase , -1 ):
lowercase : List[str] = collection[j - 1]
lowercase : str = val
return collection
if __name__ == "__main__":
_UpperCamelCase: Optional[int] = input('Enter numbers separated by a comma:\n').strip()
_UpperCamelCase: List[Any] = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 53 |
"""simple docstring"""
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_UpperCamelCase: Any = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 53 | 1 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( UpperCAmelCase__ ):
"""simple docstring"""
__magic_name__ :Optional[Any] = (EulerDiscreteScheduler,)
__magic_name__ :Optional[Any] = 10
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__lowercase )
return config
def snake_case ( self ):
'''simple docstring'''
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowercase )
def snake_case ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__lowercase , beta_end=__lowercase )
def snake_case ( self ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowercase )
def snake_case ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.scheduler_classes[0]
lowerCAmelCase__ :int = self.get_scheduler_config()
lowerCAmelCase__ :Any = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ :Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase__ :Dict = self.dummy_model()
lowerCAmelCase__ :Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ :Any = sample.to(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ :str = scheduler.scale_model_input(__lowercase , __lowercase )
lowerCAmelCase__ :Tuple = model(__lowercase , __lowercase )
lowerCAmelCase__ :Optional[Any] = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
lowerCAmelCase__ :Optional[Any] = output.prev_sample
lowerCAmelCase__ :Any = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase__ :List[Any] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.scheduler_classes[0]
lowerCAmelCase__ :Tuple = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ :Optional[int] = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase__ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ :Any = self.dummy_model()
lowerCAmelCase__ :Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase__ :Optional[Any] = sample.to(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ :Union[str, Any] = scheduler.scale_model_input(__lowercase , __lowercase )
lowerCAmelCase__ :int = model(__lowercase , __lowercase )
lowerCAmelCase__ :Any = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
lowerCAmelCase__ :List[str] = output.prev_sample
lowerCAmelCase__ :List[str] = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase__ :List[str] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 0.00_02 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.scheduler_classes[0]
lowerCAmelCase__ :int = self.get_scheduler_config()
lowerCAmelCase__ :Tuple = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowercase )
lowerCAmelCase__ :Any = torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = self.dummy_model()
lowerCAmelCase__ :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase__ :Optional[Any] = sample.to(__lowercase )
for t in scheduler.timesteps:
lowerCAmelCase__ :str = scheduler.scale_model_input(__lowercase , __lowercase )
lowerCAmelCase__ :List[str] = model(__lowercase , __lowercase )
lowerCAmelCase__ :str = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
lowerCAmelCase__ :Any = output.prev_sample
lowerCAmelCase__ :List[Any] = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase__ :Optional[int] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ :Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ :Optional[int] = scheduler_class(**__lowercase , use_karras_sigmas=__lowercase )
scheduler.set_timesteps(self.num_inference_steps , device=__lowercase )
lowerCAmelCase__ :Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = self.dummy_model()
lowerCAmelCase__ :str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase__ :Dict = sample.to(__lowercase )
for t in scheduler.timesteps:
lowerCAmelCase__ :int = scheduler.scale_model_input(__lowercase , __lowercase )
lowerCAmelCase__ :str = model(__lowercase , __lowercase )
lowerCAmelCase__ :Dict = scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase )
lowerCAmelCase__ :Tuple = output.prev_sample
lowerCAmelCase__ :List[Any] = torch.sum(torch.abs(__lowercase ) )
lowerCAmelCase__ :str = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1E-3
| 293 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _A , _A , _A , _A , _A=True , _A="pt" ):
'''simple docstring'''
snake_case_ = {"add_prefix_space": True} if isinstance(_A , _A ) and not line.startswith(" " ) else {}
snake_case_ = padding_side
return tokenizer(
[line] , max_length=_A , padding="max_length" if pad_to_max_length else None , truncation=_A , return_tensors=_A , add_special_tokens=_A , **_A , )
def lowerCamelCase__ ( _A , _A , _A=None , ):
'''simple docstring'''
snake_case_ = input_ids.ne(_A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : str , __lowercase : Tuple="train" , __lowercase : List[str]=None , __lowercase : List[Any]=None , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
snake_case_ = Path(__lowercase ).joinpath(type_path + ".source" )
snake_case_ = Path(__lowercase ).joinpath(type_path + ".target" )
snake_case_ = self.get_char_lens(self.src_file )
snake_case_ = max_source_length
snake_case_ = max_target_length
assert min(self.src_lens ) > 0, f"found empty line in {self.src_file}"
snake_case_ = tokenizer
snake_case_ = prefix
if n_obs is not None:
snake_case_ = self.src_lens[:n_obs]
snake_case_ = src_lang
snake_case_ = tgt_lang
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : List[Any] , __lowercase : Dict ):
"""simple docstring"""
snake_case_ = index + 1 # linecache starts at 1
snake_case_ = self.prefix + linecache.getline(str(self.src_file ) , __lowercase ).rstrip("\n" )
snake_case_ = linecache.getline(str(self.tgt_file ) , __lowercase ).rstrip("\n" )
assert source_line, f"empty source line for index {index}"
assert tgt_line, f"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
)
snake_case_ = self.tokenizer.generator if isinstance(self.tokenizer , __lowercase ) else self.tokenizer
snake_case_ = encode_line(__lowercase , __lowercase , self.max_source_length , "right" )
snake_case_ = encode_line(__lowercase , __lowercase , self.max_target_length , "right" )
snake_case_ = source_inputs["input_ids"].squeeze()
snake_case_ = target_inputs["input_ids"].squeeze()
snake_case_ = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def snake_case__ ( __lowercase : Optional[int] ):
"""simple docstring"""
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def snake_case__ ( self : Dict , __lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = torch.stack([x["input_ids"] for x in batch] )
snake_case_ = torch.stack([x["attention_mask"] for x in batch] )
snake_case_ = torch.stack([x["decoder_input_ids"] for x in batch] )
snake_case_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowercase )
else self.tokenizer.pad_token_id
)
snake_case_ = trim_batch(__lowercase , __lowercase )
snake_case_ , snake_case_ = trim_batch(__lowercase , __lowercase , attention_mask=__lowercase )
snake_case_ = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase__ : str = getLogger(__name__)
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_A ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = get_git_info()
save_json(_A , os.path.join(_A , "git_log.json" ) )
def lowerCamelCase__ ( _A , _A , _A=4 , **_A ):
'''simple docstring'''
with open(_A , "w" ) as f:
json.dump(_A , _A , indent=_A , **_A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with open(_A ) as f:
return json.load(_A )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = git.Repo(search_parent_directories=_A )
snake_case_ = {
"repo_id": str(_A ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return list(map(_A , _A ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
with open(_A , "wb" ) as f:
return pickle.dump(_A , _A )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def remove_articles(_A ):
return re.sub(R"\b(a|an|the)\b" , " " , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = normalize_answer(_A ).split()
snake_case_ = normalize_answer(_A ).split()
snake_case_ = Counter(_A ) & Counter(_A )
snake_case_ = sum(common.values() )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return normalize_answer(_A ) == normalize_answer(_A )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
assert len(_A ) == len(_A )
snake_case_ = 0
for hypo, pred in zip(_A , _A ):
em += exact_match_score(_A , _A )
if len(_A ) > 0:
em /= len(_A )
return {"em": em}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case_ = "dropout_rate"
for p in extra_params:
if getattr(_A , _A , _A ):
if not hasattr(_A , _A ) and not hasattr(_A , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(_A ) )
delattr(_A , _A )
continue
snake_case_ = p if hasattr(_A , _A ) else equivalent_param[p]
setattr(_A , _A , getattr(_A , _A ) )
delattr(_A , _A )
return hparams, config
| 187 | 0 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def __A ( a_ :Tuple , a_ :Optional[int] , a_ :Dict , a_ :Union[str, Any]) -> Optional[Any]:
__a : Optional[int] = multiprocessing.Manager()
__a : List[Any] = manager.list()
__a : Union[str, Any] = multiprocessing.Process(target=a_ , args=(check_program, result, timeout))
p.start()
p.join(timeout=timeout + 1)
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''')
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def __A ( a_ :int , a_ :Dict , a_ :Optional[int]) -> Union[str, Any]:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__a : List[Any] = shutil.rmtree
__a : str = os.rmdir
__a : Any = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__a : List[str] = {}
with swallow_io():
with time_limit(a_):
exec(a_ , a_)
result.append('''passed''')
except TimeoutException:
result.append('''timed out''')
except BaseException as e:
result.append(F"""failed: {e}""")
# Needed for cleaning up.
__a : str = rmtree
__a : List[Any] = rmdir
__a : int = chdir
@contextlib.contextmanager
def __A ( a_ :Tuple) -> Union[str, Any]:
def signal_handler(a_ :Optional[int] , a_ :Tuple):
raise TimeoutException('''Timed out!''')
signal.setitimer(signal.ITIMER_REAL , a_)
signal.signal(signal.SIGALRM , a_)
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0)
@contextlib.contextmanager
def __A ( ) -> Optional[Any]:
__a : Union[str, Any] = WriteOnlyStringIO()
with contextlib.redirect_stdout(a_):
with contextlib.redirect_stderr(a_):
with redirect_stdin(a_):
yield
@contextlib.contextmanager
def __A ( ) -> List[str]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(a_):
yield dirname
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
pass
class __lowercase ( io.StringIO ):
'''simple docstring'''
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
raise OSError
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
raise OSError
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
raise OSError
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return False
class __lowercase ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__lowerCAmelCase = '''stdin'''
@contextlib.contextmanager
def __A ( a_ :List[str]) -> List[Any]:
if root == ".":
yield
return
__a : int = os.getcwd()
os.chdir(a_)
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(a_)
def __A ( a_ :Optional[int]=None) -> Optional[Any]:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes))
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes))
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes))
faulthandler.disable()
import builtins
__a : Tuple = None
__a : Optional[Any] = None
import os
__a : Optional[int] = '''1'''
__a : str = None
__a : List[Any] = None
__a : Optional[Any] = None
__a : Dict = None
__a : Optional[int] = None
__a : Union[str, Any] = None
__a : int = None
__a : Any = None
__a : Union[str, Any] = None
__a : Tuple = None
__a : Dict = None
__a : int = None
__a : Any = None
__a : List[Any] = None
__a : Any = None
__a : List[Any] = None
__a : Any = None
__a : List[str] = None
__a : List[Any] = None
__a : int = None
__a : str = None
__a : List[Any] = None
__a : List[str] = None
__a : Tuple = None
__a : List[Any] = None
__a : str = None
__a : List[str] = None
import shutil
__a : str = None
__a : str = None
__a : Optional[int] = None
import subprocess
__a : List[str] = None # type: ignore
__a : List[Any] = None
import sys
__a : Optional[int] = None
__a : List[str] = None
__a : int = None
__a : Tuple = None
__a : Optional[Any] = None | 188 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowercase ( enum.Enum ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 2
@add_end_docstrings(_UpperCamelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__a : List[Any] = None
if self.model.config.prefix is not None:
__a : Tuple = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__a : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__a , __a , __a : Optional[int] = self._sanitize_parameters(prefix=_UpperCAmelCase , **self._forward_params )
__a : Dict = {**self._preprocess_params, **preprocess_params}
__a : str = {**self._forward_params, **forward_params}
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : str = {}
if prefix is not None:
__a : Tuple = prefix
if prefix:
__a : str = self.tokenizer(
_UpperCAmelCase , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=self.framework )
__a : List[str] = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
__a : str = handle_long_generation
preprocess_params.update(_UpperCAmelCase )
__a : int = generate_kwargs
__a : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
__a : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
__a : List[Any] = ReturnType.TENSORS
if return_type is not None:
__a : Union[str, Any] = return_type
if clean_up_tokenization_spaces is not None:
__a : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
__a : Any = self.tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
if len(_UpperCAmelCase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__a : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_UpperCAmelCase , **_UpperCAmelCase )
def __call__( self , _UpperCAmelCase , **_UpperCAmelCase ):
return super().__call__(_UpperCAmelCase , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase="" , _UpperCAmelCase=None , **_UpperCAmelCase ):
__a : Tuple = self.tokenizer(
prefix + prompt_text , padding=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=self.framework )
__a : int = prompt_text
if handle_long_generation == "hole":
__a : str = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
__a : Tuple = generate_kwargs['''max_new_tokens''']
else:
__a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__a : Dict = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
__a : int = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
__a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def _lowerCamelCase ( self , _UpperCAmelCase , **_UpperCAmelCase ):
__a : str = model_inputs['''input_ids''']
__a : Dict = model_inputs.get('''attention_mask''' , _UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
__a : str = None
__a : List[Any] = None
__a : Any = 1
else:
__a : List[Any] = input_ids.shape[0]
__a : str = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__a : List[str] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
__a : str = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
__a : Tuple = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__a : List[str] = self.model.generate(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , **_UpperCAmelCase )
__a : int = generated_sequence.shape[0]
if self.framework == "pt":
__a : Union[str, Any] = generated_sequence.reshape(_UpperCAmelCase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__a : Dict = tf.reshape(_UpperCAmelCase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=ReturnType.FULL_TEXT , _UpperCAmelCase=True ):
__a : Optional[Any] = model_outputs['''generated_sequence'''][0]
__a : List[str] = model_outputs['''input_ids''']
__a : Optional[Any] = model_outputs['''prompt_text''']
__a : str = generated_sequence.numpy().tolist()
__a : List[Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__a : Union[str, Any] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__a : List[str] = self.tokenizer.decode(
_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__a : Dict = 0
else:
__a : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , ) )
if return_type == ReturnType.FULL_TEXT:
__a : Any = prompt_text + text[prompt_length:]
else:
__a : Any = text[prompt_length:]
__a : Dict = {'''generated_text''': all_text}
records.append(_UpperCAmelCase )
return records | 188 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a_ ( self ):
snake_case = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
snake_case = AutoTokenizer.from_pretrained('''google/mt5-small''' )
snake_case = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
snake_case = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
snake_case = shift_tokens_right(__snake_case , model.config.pad_token_id , model.config.decoder_start_token_id )
snake_case = model(__snake_case , decoder_input_ids=__snake_case ).logits
snake_case = optax.softmax_cross_entropy(__snake_case , onehot(__snake_case , logits.shape[-1] ) ).mean()
snake_case = -(labels.shape[-1] * loss.item())
snake_case = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 127 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'mobilenet_v2'
def __init__( self , __snake_case=3 , __snake_case=2_2_4 , __snake_case=1.0 , __snake_case=8 , __snake_case=8 , __snake_case=6 , __snake_case=3_2 , __snake_case=True , __snake_case=True , __snake_case="relu6" , __snake_case=True , __snake_case=0.8 , __snake_case=0.02 , __snake_case=0.001 , __snake_case=2_5_5 , **__snake_case , ):
super().__init__(**__snake_case )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
snake_case = num_channels
snake_case = image_size
snake_case = depth_multiplier
snake_case = depth_divisible_by
snake_case = min_depth
snake_case = expand_ratio
snake_case = output_stride
snake_case = first_layer_is_expansion
snake_case = finegrained_output
snake_case = hidden_act
snake_case = tf_padding
snake_case = classifier_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = semantic_loss_ignore_index
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def a_ ( self ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def a_ ( self ):
return 1E-4
| 127 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None ):
super().__init__(
_a , question_encoder_tokenizer=_a , generator_tokenizer=_a , index=_a , init_retrieval=_a , )
__a = None
def __UpperCAmelCase ( self , _a ):
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
__a = self._infer_socket_ifname()
# avoid clash with the NCCL port
__a = str(distributed_port + 1 )
__a = dist.new_group(ranks=_a , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __UpperCAmelCase ( self ):
return dist.get_rank(group=self.process_group ) == 0
def __UpperCAmelCase ( self , _a , _a , _a=torch.floataa ):
__a = torch.empty(_a , dtype=_a )
dist.scatter(_a , src=0 , scatter_list=_a , group=self.process_group )
return target_tensor
def __UpperCAmelCase ( self ):
__a = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__a = next((addr for addr in addrs if addr.startswith('''e''' )) , _a )
return ifname
def __UpperCAmelCase ( self , _a , _a ):
# single GPU training
if not dist.is_initialized():
__a , __a = self._main_retrieve(_a , _a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_a )
# distributed training
__a = dist.get_world_size(group=self.process_group )
# gather logic
__a = None
if self._is_main():
__a = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_a )]
dist.gather(torch.tensor(_a ) , dst=0 , gather_list=_a , group=self.process_group )
# scatter logic
__a = question_hidden_states.shape[0]
__a = []
__a = []
if self._is_main():
assert len(_a ) == world_size
__a , __a = self._main_retrieve(torch.cat(_a ).numpy() , _a )
__a , __a = torch.tensor(_a ), torch.tensor(_a )
__a = self._chunk_tensor(_a , _a )
__a = self._chunk_tensor(_a , _a )
__a = self._scattered(_a , [n_queries, n_docs] , target_type=torch.intaa )
__a = self._scattered(_a , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_a )
| 11 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11 | 1 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_snake_case = getLogger(__name__)
_snake_case = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 8 , SCREAMING_SNAKE_CASE_ = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="summarization" , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowerCamelCase : int = Path(SCREAMING_SNAKE_CASE_ ).open("w" , encoding="utf-8" )
lowerCamelCase : Optional[int] = str(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
if fpaa:
lowerCamelCase : str = model.half()
lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
lowerCamelCase : Tuple = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if prefix is None:
lowerCamelCase : Optional[int] = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ):
lowerCamelCase : str = [prefix + text for text in examples_chunk]
lowerCamelCase : int = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="pt" , truncation=SCREAMING_SNAKE_CASE_ , padding="longest" ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE_ , )
lowerCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
lowerCamelCase : Tuple = int(time.time() - start_time ) # seconds
lowerCamelCase : int = len(SCREAMING_SNAKE_CASE_ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase_( ):
'''simple docstring'''
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def lowercase_( SCREAMING_SNAKE_CASE_=True ):
'''simple docstring'''
lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument("model_name" , type=SCREAMING_SNAKE_CASE_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=SCREAMING_SNAKE_CASE_ , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=SCREAMING_SNAKE_CASE_ , help="where to save summaries" )
parser.add_argument("--reference_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=SCREAMING_SNAKE_CASE_ , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=SCREAMING_SNAKE_CASE_ , default=8 , required=SCREAMING_SNAKE_CASE_ , help="batch size" )
parser.add_argument(
"--n_obs" , type=SCREAMING_SNAKE_CASE_ , default=-1 , required=SCREAMING_SNAKE_CASE_ , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=SCREAMING_SNAKE_CASE_ , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowerCamelCase , lowerCamelCase : Optional[Any] = parser.parse_known_args()
lowerCamelCase : Optional[Any] = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE_ )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
lowerCamelCase : Union[str, Any] = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowerCamelCase : Union[str, Any] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
lowerCamelCase : Dict = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE_ , )
if args.reference_path is None:
return {}
# Compute scores
lowerCamelCase : List[str] = calculate_bleu if "translation" in args.task else calculate_rouge
lowerCamelCase : List[str] = [x.rstrip() for x in open(args.save_path ).readlines()]
lowerCamelCase : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE_ )]
lowerCamelCase : dict = score_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
scores.update(SCREAMING_SNAKE_CASE_ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE_ )
if args.info:
lowerCamelCase : List[Any] = args.info
if verbose:
print(SCREAMING_SNAKE_CASE_ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE_ , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 283 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : List[Any] = BioGptTokenizer
__A : Optional[int] = False
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase : str = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : Dict = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__A ) )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Dict = "lower newer"
lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase : Optional[int] = "lower"
lowerCamelCase : Any = ["low", "er</w>"]
lowerCamelCase : List[str] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Union[str, Any] = tokens + ["<unk>"]
lowerCamelCase : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
lowerCamelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=__A )
lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__A )
lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A )
lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 283 | 1 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class snake_case__ ( unittest.TestCase ):
A__ = JukeboxTokenizer
A__ = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def A_ ( self : Any ) -> Dict:
'''simple docstring'''
import torch
__snake_case : List[Any] = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
__snake_case : Optional[Any] = tokenizer(**self.metas )['input_ids']
# fmt: off
__snake_case : List[str] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
import torch
__snake_case : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
__snake_case : str = tokenizer(**self.metas )['input_ids']
# fmt: off
__snake_case : Optional[Any] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 0 |
'''simple docstring'''
import math
def a_ ( _UpperCAmelCase : int ) -> list:
__snake_case : Optional[Any] = [True] * n
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = True
for i in range(3 ,int(n**0.5 + 1 ) ,2 ):
__snake_case : Optional[int] = i * 2
while index < n:
__snake_case : Union[str, Any] = False
__snake_case : int = index + i
__snake_case : Dict = [2]
for i in range(3 ,_UpperCAmelCase ,2 ):
if is_prime[i]:
primes.append(_UpperCAmelCase )
return primes
def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int:
__snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00
__snake_case : Tuple = prime_sieve(_UpperCAmelCase )
__snake_case : List[Any] = 0
__snake_case : List[Any] = 0
__snake_case : Optional[int] = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : Optional[int] = primes[prime_index + 1]
__snake_case : Union[str, Any] = last_prime**2
__snake_case : Dict = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Optional[Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : Optional[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : List[str] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
A_ = None
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
A_ = '''▁'''
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = BarthezTokenizer
def __init__( self: Tuple, a_: Union[str, Any]=None, a_: Dict=None, a_: Tuple="<s>", a_: int="</s>", a_: Dict="</s>", a_: Dict="<s>", a_: Any="<unk>", a_: Tuple="<pad>", a_: Tuple="<mask>", **a_: Tuple, ):
'''simple docstring'''
_snake_case : List[Any] = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else mask_token
super().__init__(
a_, tokenizer_file=a_, bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, cls_token=a_, pad_token=a_, mask_token=a_, **a_, )
_snake_case : Optional[int] = vocab_file
_snake_case : List[str] = False if not self.vocab_file else True
def UpperCamelCase_ ( self: Optional[Any], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case : Optional[int] = [self.cls_token_id]
_snake_case : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self: Tuple, a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : str = [self.sep_token_id]
_snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self: List[Any], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_snake_case : int = os.path.join(
a_, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file, a_ )
return (out_vocab_file,)
| 64 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] ={
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
snake_case : Optional[Any] = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
snake_case : int = None
def __lowerCamelCase ( ):
"""simple docstring"""
a :Optional[Any] = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=UpperCAmelCase_ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=UpperCAmelCase_ , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
a :Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a :str = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
def remove_articles(UpperCAmelCase_ : Tuple ):
return ARTICLES_REGEX.sub(''' ''' , UpperCAmelCase_ )
def white_space_fix(UpperCAmelCase_ : int ):
return " ".join(text.split() )
def remove_punc(UpperCAmelCase_ : Dict ):
a :Optional[Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCAmelCase_ : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(UpperCAmelCase_ ) ) ) )
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
if not s:
return []
return normalize_answer(UpperCAmelCase_ ).split()
def __lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ):
"""simple docstring"""
return int(normalize_answer(UpperCAmelCase_ ) == normalize_answer(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
a :str = get_tokens(UpperCAmelCase_ )
a :List[Any] = get_tokens(UpperCAmelCase_ )
a :Dict = collections.Counter(UpperCAmelCase_ ) & collections.Counter(UpperCAmelCase_ )
a :Any = sum(common.values() )
if len(UpperCAmelCase_ ) == 0 or len(UpperCAmelCase_ ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a :int = 1.0 * num_same / len(UpperCAmelCase_ )
a :Tuple = 1.0 * num_same / len(UpperCAmelCase_ )
a :Any = (2 * precision * recall) / (precision + recall)
return fa
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
a :Tuple = {}
a :Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a :Tuple = qa['''id''']
a :int = [t for t in qa['''answers''']['''text'''] if normalize_answer(UpperCAmelCase_ )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a :List[Any] = ['''''']
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
a :str = preds[qid]
# Take max over all gold answers
a :Optional[int] = max(compute_exact(UpperCAmelCase_ , UpperCAmelCase_ ) for a in gold_answers )
a :List[str] = max(compute_fa(UpperCAmelCase_ , UpperCAmelCase_ ) for a in gold_answers )
return exact_scores, fa_scores
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
"""simple docstring"""
a :List[Any] = {}
for qid, s in scores.items():
a :Optional[int] = na_probs[qid] > na_prob_thresh
if pred_na:
a :List[str] = float(not qid_to_has_ans[qid] )
else:
a :List[str] = s
return new_scores
def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple=None ):
"""simple docstring"""
if not qid_list:
a :List[Any] = len(UpperCAmelCase_ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
a :Tuple = len(UpperCAmelCase_ )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ):
"""simple docstring"""
for k in new_eval:
a :Any = new_eval[k]
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
plt.step(UpperCAmelCase_ , UpperCAmelCase_ , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(UpperCAmelCase_ , UpperCAmelCase_ , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(UpperCAmelCase_ )
plt.savefig(UpperCAmelCase_ )
plt.clf()
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : int=None ):
"""simple docstring"""
a :Any = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : na_probs[k] )
a :Optional[int] = 0.0
a :Optional[Any] = 1.0
a :Union[str, Any] = 0.0
a :Optional[int] = [1.0]
a :Optional[int] = [0.0]
a :Union[str, Any] = 0.0
for i, qid in enumerate(UpperCAmelCase_ ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a :List[str] = true_pos / float(i + 1 )
a :Any = true_pos / float(UpperCAmelCase_ )
if i == len(UpperCAmelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(UpperCAmelCase_ )
recalls.append(UpperCAmelCase_ )
if out_image:
plot_pr_curve(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return {"ap": 100.0 * avg_prec}
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
if out_image_dir and not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
a :str = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a :List[str] = make_precision_recall_eval(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
a :Any = make_precision_recall_eval(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
a :List[Any] = {k: float(UpperCAmelCase_ ) for k, v in qid_to_has_ans.items()}
a :Optional[Any] = make_precision_recall_eval(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , out_image=os.path.join(UpperCAmelCase_ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , '''pr_exact''' )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , '''pr_f1''' )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , '''pr_oracle''' )
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
if not qid_list:
return
a :Tuple = [na_probs[k] for k in qid_list]
a :Dict = np.ones_like(UpperCAmelCase_ ) / float(len(UpperCAmelCase_ ) )
plt.hist(UpperCAmelCase_ , weights=UpperCAmelCase_ , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(UpperCAmelCase_ , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
a :int = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a :List[str] = num_no_ans
a :List[str] = cur_score
a :Dict = 0.0
a :Union[str, Any] = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : na_probs[k] )
for i, qid in enumerate(UpperCAmelCase_ ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a :Tuple = scores[qid]
else:
if preds[qid]:
a :List[Any] = -1
else:
a :Union[str, Any] = 0
cur_score += diff
if cur_score > best_score:
a :Any = cur_score
a :Dict = na_probs[qid]
return 100.0 * best_score / len(UpperCAmelCase_ ), best_thresh
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
a :str = find_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :Optional[Any] = find_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :Dict = best_exact
a :Optional[int] = exact_thresh
a :List[Any] = best_fa
a :Optional[int] = fa_thresh
def __lowerCamelCase ( ):
"""simple docstring"""
with open(OPTS.data_file ) as f:
a :Dict = json.load(UpperCAmelCase_ )
a :Optional[Any] = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
a :List[str] = json.load(UpperCAmelCase_ )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a :Any = json.load(UpperCAmelCase_ )
else:
a :Optional[Any] = {k: 0.0 for k in preds}
a :Tuple = make_qid_to_has_ans(UpperCAmelCase_ ) # maps qid to True/False
a :Dict = [k for k, v in qid_to_has_ans.items() if v]
a :Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v]
a :Union[str, Any] = get_raw_scores(UpperCAmelCase_ , UpperCAmelCase_ )
a :Tuple = apply_no_ans_threshold(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.na_prob_thresh )
a :Optional[Any] = apply_no_ans_threshold(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.na_prob_thresh )
a :Any = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ )
if has_ans_qids:
a :Optional[int] = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ , qid_list=UpperCAmelCase_ )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , '''HasAns''' )
if no_ans_qids:
a :Dict = make_eval_dict(UpperCAmelCase_ , UpperCAmelCase_ , qid_list=UpperCAmelCase_ )
merge_eval(UpperCAmelCase_ , UpperCAmelCase_ , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir )
histogram_na_prob(UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(UpperCAmelCase_ , UpperCAmelCase_ , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
else:
print(json.dumps(UpperCAmelCase_ , indent=2 ) )
if __name__ == "__main__":
snake_case : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 359 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'microsoft/speecht5_tts'
SCREAMING_SNAKE_CASE__ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
SCREAMING_SNAKE_CASE__ = 'text_reader'
SCREAMING_SNAKE_CASE__ = SpeechTaProcessor
SCREAMING_SNAKE_CASE__ = SpeechTaForTextToSpeech
SCREAMING_SNAKE_CASE__ = SpeechTaHifiGan
SCREAMING_SNAKE_CASE__ = ['text']
SCREAMING_SNAKE_CASE__ = ['audio']
def SCREAMING_SNAKE_CASE__ ( self ):
if self.post_processor is None:
a :List[Any] = '''microsoft/speecht5_hifigan'''
super().setup()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=None ):
a :Tuple = self.pre_processor(text=_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
a :List[Any] = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
a :int = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
with torch.no_grad():
return self.model.generate_speech(**_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
with torch.no_grad():
return self.post_processor(_lowerCamelCase ).cpu().detach()
| 281 | 0 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _snake_case ( lowercase_ ):
def lowerCAmelCase__ ( self , a__ ) -> Dict:
'''simple docstring'''
with open(a__ , encoding="utf-8" ) as input_file:
snake_case_ = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
snake_case_ = input_file.read()
snake_case_ = regexp.search(a__ )
return match
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
with open(a__ , encoding="utf-8" ) as input_file:
snake_case_ = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
snake_case_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
snake_case_ = regexp.finditer(a__ )
snake_case_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Path("./datasets" )
snake_case_ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(a__ ) ):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Path("./datasets" )
snake_case_ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(a__ ) ):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 85 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase_ = 'src/diffusers'
UpperCamelCase_ = '.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase_ = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase_ = spec.loader.load_module()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$" , UpperCAmelCase ) is not None
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = object_name.split("." )
a_ = 0
# First let's find the module where our object lives.
a_ = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , F'''{module}.py''' ) ):
i += 1
if i < len(UpperCAmelCase ):
a_ = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(UpperCAmelCase , F'''{module}.py''' ) , "r" , encoding="utf-8" , newline="\n" ) as f:
a_ = f.readlines()
# Now let's find the class / func in the code!
a_ = ""
a_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
a_ = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
return "".join(UpperCAmelCase )
UpperCamelCase_ = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase_ = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase_ = re.compile(R'<FILL\s+[^>]*>')
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = code.split("\n" )
a_ = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(r"^(\s*)\S" , lines[idx] ).groups()[0]
return ""
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
a_ = F'''class Bla:\n{code}'''
a_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
a_ = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
a_ , a_ = style_docstrings_in_code(UpperCAmelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False ) ->str:
"""simple docstring"""
with open(UpperCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
a_ = f.readlines()
a_ = []
a_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
a_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
a_ , a_ , a_ = search.groups()
a_ = find_code_in_diffusers(UpperCAmelCase )
a_ = get_indent(UpperCAmelCase )
a_ = line_index + 1 if indent == theoretical_indent else line_index + 2
a_ = theoretical_indent
a_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
a_ = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
a_ = lines[line_index]
a_ = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(F'''^{indent}# End copy''' , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
a_ = "".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
a_ = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
a_ = "\n".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
a_ = replace_pattern.replace("with" , "" ).split("," )
a_ = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
a_ , a_ , a_ = pattern.groups()
a_ = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
a_ = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
a_ = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
a_ = blackify(lines[start_index - 1] + theoretical_code )
a_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
a_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
a_ = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def UpperCamelCase ( UpperCAmelCase = False ) ->int:
"""simple docstring"""
a_ = glob.glob(os.path.join(UpperCAmelCase , "**/*.py" ) , recursive=UpperCAmelCase )
a_ = []
for filename in all_files:
a_ = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
a_ = "\n".join(UpperCAmelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase_ = parser.parse_args()
check_copies(args.fix_and_overwrite) | 243 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]="pt" ) -> int:
UpperCAmelCase : int = {"""add_prefix_space""": True} if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not line.startswith(""" """ ) else {}
UpperCAmelCase : Tuple = padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE_ , padding="""max_length""" if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[str]=None , ) -> Tuple:
UpperCAmelCase : Tuple = input_ids.ne(SCREAMING_SNAKE_CASE_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="train" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="" , ) -> Dict:
'''simple docstring'''
super().__init__()
UpperCAmelCase : Optional[int] = Path(_lowercase ).joinpath(type_path + """.source""" )
UpperCAmelCase : List[Any] = Path(_lowercase ).joinpath(type_path + """.target""" )
UpperCAmelCase : List[Any] = self.get_char_lens(self.src_file )
UpperCAmelCase : Any = max_source_length
UpperCAmelCase : List[str] = max_target_length
assert min(self.src_lens ) > 0, F"found empty line in {self.src_file}"
UpperCAmelCase : Tuple = tokenizer
UpperCAmelCase : Any = prefix
if n_obs is not None:
UpperCAmelCase : Union[str, Any] = self.src_lens[:n_obs]
UpperCAmelCase : int = src_lang
UpperCAmelCase : Tuple = tgt_lang
def __len__( self ) -> Tuple:
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase : List[str] = index + 1 # linecache starts at 1
UpperCAmelCase : int = self.prefix + linecache.getline(str(self.src_file ) , _lowercase ).rstrip("""\n""" )
UpperCAmelCase : Optional[int] = linecache.getline(str(self.tgt_file ) , _lowercase ).rstrip("""\n""" )
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
UpperCAmelCase : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
)
UpperCAmelCase : List[Any] = self.tokenizer.generator if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
UpperCAmelCase : Optional[Any] = encode_line(_lowercase , _lowercase , self.max_source_length , """right""" )
UpperCAmelCase : Optional[Any] = encode_line(_lowercase , _lowercase , self.max_target_length , """right""" )
UpperCAmelCase : Dict = source_inputs["""input_ids"""].squeeze()
UpperCAmelCase : Union[str, Any] = target_inputs["""input_ids"""].squeeze()
UpperCAmelCase : Dict = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
return [len(_lowercase ) for x in Path(_lowercase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase : Dict = torch.stack([x["""input_ids"""] for x in batch] )
UpperCAmelCase : Any = torch.stack([x["""attention_mask"""] for x in batch] )
UpperCAmelCase : int = torch.stack([x["""decoder_input_ids"""] for x in batch] )
UpperCAmelCase : Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
UpperCAmelCase : Optional[int] = trim_batch(_lowercase , _lowercase )
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = trim_batch(_lowercase , _lowercase , attention_mask=_lowercase )
UpperCAmelCase : List[Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
A: List[str] = getLogger(__name__)
def _snake_case ( UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( UpperCamelCase : int ) -> None:
UpperCAmelCase : int = get_git_info()
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , """git_log.json""" ) )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str]=4 , **UpperCamelCase : int ) -> Tuple:
with open(SCREAMING_SNAKE_CASE_ , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( UpperCamelCase : int ) -> int:
with open(SCREAMING_SNAKE_CASE_ ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def _snake_case ( ) -> str:
UpperCAmelCase : Union[str, Any] = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase : int = {
"""repo_id""": str(SCREAMING_SNAKE_CASE_ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : List[str] ) -> List:
return list(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : Tuple ) -> int:
with open(SCREAMING_SNAKE_CASE_ , """wb""" ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _snake_case ( UpperCamelCase : Optional[int] ) -> Tuple:
def remove_articles(UpperCamelCase : List[Any] ):
return re.sub(R"""\b(a|an|the)\b""" , """ """ , SCREAMING_SNAKE_CASE_ )
def white_space_fix(UpperCamelCase : int ):
return " ".join(text.split() )
def remove_punc(UpperCamelCase : Any ):
UpperCAmelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(UpperCamelCase : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def _snake_case ( UpperCamelCase : str , UpperCamelCase : Dict ) -> Union[str, Any]:
UpperCAmelCase : Optional[Any] = normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
UpperCAmelCase : str = normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
UpperCAmelCase : Union[str, Any] = Counter(SCREAMING_SNAKE_CASE_ ) & Counter(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase : List[str] = sum(common.values() )
if num_same == 0:
return 0
UpperCAmelCase : Dict = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase : List[Any] = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase : Optional[int] = (2 * precision * recall) / (precision + recall)
return fa
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Dict ) -> Tuple:
return normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ )
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : Dict ) -> Dict:
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase : Union[str, Any] = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
em += exact_match_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
em /= len(SCREAMING_SNAKE_CASE_ )
return {"em": em}
def _snake_case ( UpperCamelCase : Union[str, Any] ) -> Dict:
return model_prefix.startswith("""rag""" )
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : List[str] ) -> Optional[int]:
UpperCAmelCase : str = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
UpperCAmelCase : List[str] = """dropout_rate"""
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not hasattr(SCREAMING_SNAKE_CASE_ , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
UpperCAmelCase : Union[str, Any] = p if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return hparams, config
| 366 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A: int = logging.get_logger(__name__)
A: Any = {"vocab_file": "vocab.txt"}
A: Optional[int] = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
A: Optional[int] = {
"YituTech/conv-bert-base": 5_1_2,
"YituTech/conv-bert-medium-small": 5_1_2,
"YituTech/conv-bert-small": 5_1_2,
}
A: int = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : int = ConvBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase : Dict = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
UpperCAmelCase : str = do_lower_case
UpperCAmelCase : Optional[int] = strip_accents
UpperCAmelCase : List[str] = tokenize_chinese_chars
UpperCAmelCase : Dict = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = do_lower_case
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : Dict = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 76 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = KandinskyVaaControlnetPipeline
A_ : Union[str, Any] = ['image_embeds', 'negative_image_embeds', 'hint']
A_ : Any = ['image_embeds', 'negative_image_embeds', 'hint']
A_ : int = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A_ : Optional[Any] = False
@property
def _UpperCAmelCase ( self ) -> List[Any]:
return 32
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return 32
@property
def _UpperCAmelCase ( self ) -> str:
return self.time_input_dim
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self ) -> List[str]:
return 100
@property
def _UpperCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_a = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a = UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def _UpperCAmelCase ( self ) -> Any:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_a = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.dummy_unet
_a = self.dummy_movq
_a = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__UpperCAmelCase , )
_a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=0 ) -> List[str]:
_a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
_a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCAmelCase )
# create hint
_a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
if str(__UpperCAmelCase ).startswith('''mps''' ):
_a = torch.manual_seed(__UpperCAmelCase )
else:
_a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_a = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _UpperCAmelCase ( self ) -> Any:
_a = '''cpu'''
_a = self.get_dummy_components()
_a = self.pipeline_class(**__UpperCAmelCase )
_a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
_a = output.images
_a = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Any:
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
_a = torch.from_numpy(np.array(__UpperCAmelCase ) ).float() / 255.0
_a = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
_a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
_a = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
_a = pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
_a = '''A robot, 4k photo'''
_a = torch.Generator(device='''cuda''' ).manual_seed(0 )
_a , _a = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a = torch.Generator(device='''cuda''' ).manual_seed(0 )
_a = pipeline(
image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type='''np''' , )
_a = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase ) | 320 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
for char in word:
_a = ord(_lowerCAmelCase )
if not _is_chinese_char(_lowerCAmelCase ):
return 0
return 1
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = set()
for token in tokens:
_a = len(_lowerCAmelCase ) > 1 and is_chinese(_lowerCAmelCase )
if chinese_word:
word_set.add(_lowerCAmelCase )
_a = list(_lowerCAmelCase )
return word_list
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a = max([len(_lowerCAmelCase ) for w in chinese_word_set] )
_a = bert_tokens
_a , _a = 0, len(_lowerCAmelCase )
while start < end:
_a = True
if is_chinese(bert_word[start] ):
_a = min(end - start, _lowerCAmelCase )
for i in range(_lowerCAmelCase, 1, -1 ):
_a = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1, start + i ):
_a = '''##''' + bert_word[j]
_a = start + i
_a = False
break
if single_word:
start += 1
return bert_word
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : LTP, _lowerCAmelCase : BertTokenizer ):
"""simple docstring"""
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = ltp_tokenizer.pipeline(lines[i : i + 1_00], tasks=['''cws'''] ).cws
_a = [get_chinese_word(_lowerCAmelCase ) for r in res]
ltp_res.extend(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for i in range(0, len(_lowerCAmelCase ), 1_00 ):
_a = bert_tokenizer(lines[i : i + 1_00], add_special_tokens=_lowerCAmelCase, truncation=_lowerCAmelCase, max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
_a = []
for input_ids, chinese_word in zip(_lowerCAmelCase, _lowerCAmelCase ):
_a = []
for id in input_ids:
_a = bert_tokenizer._convert_id_to_token(_lowerCAmelCase )
input_tokens.append(_lowerCAmelCase )
_a = add_sub_symbol(_lowerCAmelCase, _lowerCAmelCase )
_a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCAmelCase ):
if token[:2] == "##":
_a = token[2:]
# save chinese tokens' pos
if len(_lowerCAmelCase ) == 1 and _is_chinese_char(ord(_lowerCAmelCase ) ):
ref_id.append(_lowerCAmelCase )
ref_ids.append(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
return ref_ids
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
with open(args.file_name, '''r''', encoding='''utf-8''' ) as f:
_a = f.readlines()
_a = [line.strip() for line in data if len(_lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a = LTP(args.ltp ) # faster in GPU device
_a = BertTokenizer.from_pretrained(args.bert )
_a = prepare_ref(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
with open(args.save_path, '''w''', encoding='''utf-8''' ) as f:
_a = [json.dumps(_lowerCAmelCase ) + '''\n''' for ref in ref_ids]
f.writelines(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
__snake_case = parser.parse_args()
main(args) | 320 | 1 |
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase : Dict , UpperCAmelCase : Dict ):
'''simple docstring'''
for e in env_keys:
UpperCamelCase__ : List[str] =int(os.environ.get(UpperCAmelCase , -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase : List[Any] , UpperCAmelCase : Any=False ):
'''simple docstring'''
UpperCamelCase__ : List[Any] =os.environ.get(UpperCAmelCase , str(UpperCAmelCase ) )
return strtobool(UpperCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase : str , UpperCAmelCase : str="no" ):
'''simple docstring'''
UpperCamelCase__ : List[str] =os.environ.get(UpperCAmelCase , str(UpperCAmelCase ) )
return value
| 157 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __a :
"""simple docstring"""
def __init__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Any=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : int=True , lowercase_ : List[str]=True , lowercase_ : int=True , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]=99 , lowercase_ : int=32 , lowercase_ : List[Any]=2 , lowercase_ : Optional[int]=4 , lowercase_ : Dict=37 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Dict=16 , lowercase_ : Optional[int]=2 , lowercase_ : Optional[int]=0.0_2 , lowercase_ : Dict=3 , lowercase_ : Optional[int]=4 , lowercase_ : Any=None , ):
UpperCamelCase__ : Any =parent
UpperCamelCase__ : Any =13
UpperCamelCase__ : int =7
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : Dict =True
UpperCamelCase__ : int =True
UpperCamelCase__ : Tuple =True
UpperCamelCase__ : Any =99
UpperCamelCase__ : Any =32
UpperCamelCase__ : Union[str, Any] =2
UpperCamelCase__ : List[Any] =4
UpperCamelCase__ : Any =37
UpperCamelCase__ : Union[str, Any] ='''gelu'''
UpperCamelCase__ : Dict =0.1
UpperCamelCase__ : int =0.1
UpperCamelCase__ : Union[str, Any] =512
UpperCamelCase__ : Dict =16
UpperCamelCase__ : List[Any] =2
UpperCamelCase__ : str =0.0_2
UpperCamelCase__ : Optional[Any] =3
UpperCamelCase__ : List[str] =4
UpperCamelCase__ : Optional[int] =None
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Any =None
if self.use_input_mask:
UpperCamelCase__ : List[Any] =random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : List[Any] =None
if self.use_token_type_ids:
UpperCamelCase__ : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : str =None
UpperCamelCase__ : Union[str, Any] =None
UpperCamelCase__ : str =None
if self.use_labels:
UpperCamelCase__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : int =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : int ):
UpperCamelCase__ : str =TFRoFormerModel(config=lowercase_ )
UpperCamelCase__ : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ : Dict =[input_ids, input_mask]
UpperCamelCase__ : Tuple =model(lowercase_ )
UpperCamelCase__ : str =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : int ):
UpperCamelCase__ : Optional[Any] =True
UpperCamelCase__ : List[Any] =TFRoFormerForCausalLM(config=lowercase_ )
UpperCamelCase__ : Optional[Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Any =model(lowercase_ )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _lowerCAmelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
UpperCamelCase__ : str =TFRoFormerForMaskedLM(config=lowercase_ )
UpperCamelCase__ : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Optional[int] =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : int ):
UpperCamelCase__ : Tuple =self.num_labels
UpperCamelCase__ : List[str] =TFRoFormerForSequenceClassification(config=lowercase_ )
UpperCamelCase__ : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : Optional[Any] =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] ):
UpperCamelCase__ : Tuple =self.num_choices
UpperCamelCase__ : Tuple =TFRoFormerForMultipleChoice(config=lowercase_ )
UpperCamelCase__ : Optional[int] =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : int =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : List[str] =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase__ : int ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase__ : Tuple =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Dict , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Tuple ):
UpperCamelCase__ : Optional[int] =self.num_labels
UpperCamelCase__ : List[str] =TFRoFormerForTokenClassification(config=lowercase_ )
UpperCamelCase__ : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : int =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : str , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : str ):
UpperCamelCase__ : Dict =TFRoFormerForQuestionAnswering(config=lowercase_ )
UpperCamelCase__ : Optional[Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
UpperCamelCase__ : List[str] =model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : List[str] =self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) : Tuple =config_and_inputs
UpperCamelCase__ : Any ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __a ( snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Tuple , lowercase_ : int ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : List[Any] =TFRoFormerModelTester(self )
UpperCamelCase__ : Any =ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def _lowerCAmelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowercase_ )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : Optional[Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowercase_ )
@require_tf
class __a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : List[str] =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
UpperCamelCase__ : List[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase__ : Any =model(lowercase_ )[0]
# TODO Replace vocab size
UpperCamelCase__ : Union[str, Any] =5_0000
UpperCamelCase__ : Optional[Any] =[1, 6, vocab_size]
self.assertEqual(output.shape , lowercase_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
UpperCamelCase__ : Optional[Any] =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1e-4 )
@require_tf
class __a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1e-4
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : str =tf.constant([[4, 10]] )
UpperCamelCase__ : Dict =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
UpperCamelCase__ : Any =emba(input_ids.shape )
UpperCamelCase__ : Union[str, Any] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(lowercase_ , lowercase_ , atol=self.tolerance )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : Dict =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
UpperCamelCase__ : int =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
UpperCamelCase__ : Optional[int] =emba.weight[:3, :5]
tf.debugging.assert_near(lowercase_ , lowercase_ , atol=self.tolerance )
@require_tf
class __a ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1e-4
def _lowerCAmelCase ( self : str ):
# 2,12,16,64
UpperCamelCase__ : Optional[int] =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase__ : Optional[int] =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
UpperCamelCase__ : Optional[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
UpperCamelCase__ : Union[str, Any] =embed_positions([2, 16, 768] )[None, None, :, :]
UpperCamelCase__ , UpperCamelCase__ : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase__ : Optional[int] =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
UpperCamelCase__ : List[str] =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowercase_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowercase_ , atol=self.tolerance )
| 157 | 1 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 270 |
from __future__ import annotations
from statistics import mean
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i]
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCamelCase = []
__lowerCamelCase = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowerCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCamelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCamelCase = 0
__lowerCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE__ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 270 | 1 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> List[str]:
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase ) -> Dict:
A: Optional[int] = to_pil_image(__lowercase )
A , A: str = pil_image.size
A: str = pytesseract.image_to_data(__lowercase , lang=__lowercase , output_type='''dict''' , config=__lowercase )
A , A , A , A , A: str = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
A: int = [idx for idx, word in enumerate(__lowercase ) if not word.strip()]
A: Optional[Any] = [word for idx, word in enumerate(__lowercase ) if idx not in irrelevant_indices]
A: Optional[int] = [coord for idx, coord in enumerate(__lowercase ) if idx not in irrelevant_indices]
A: int = [coord for idx, coord in enumerate(__lowercase ) if idx not in irrelevant_indices]
A: Optional[Any] = [coord for idx, coord in enumerate(__lowercase ) if idx not in irrelevant_indices]
A: Union[str, Any] = [coord for idx, coord in enumerate(__lowercase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
A: List[Any] = []
for x, y, w, h in zip(__lowercase , __lowercase , __lowercase , __lowercase ):
A: Union[str, Any] = [x, y, x + w, y + h]
actual_boxes.append(__lowercase )
# finally, normalize the bounding boxes
A: List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowercase , __lowercase , __lowercase ) )
assert len(__lowercase ) == len(__lowercase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Dict = ["""pixel_values"""]
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : float = 1 / 2_55 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[float, Iterable[float]] = None , SCREAMING_SNAKE_CASE_ : Union[float, Iterable[float]] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "" , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
A: Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = do_resize
A: str = size
A: Optional[Any] = resample
A: List[str] = do_rescale
A: List[Any] = rescale_value
A: Dict = do_normalize
A: Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A: Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
A: Union[str, Any] = apply_ocr
A: List[str] = ocr_lang
A: Optional[Any] = tesseract_config
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> np.ndarray:
'''simple docstring'''
A: Any = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
A: List[Any] = (size['''height'''], size['''width'''])
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, float] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> np.ndarray:
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, Iterable[float]] , SCREAMING_SNAKE_CASE_ : Union[float, Iterable[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Union[float, Iterable[float]] = None , SCREAMING_SNAKE_CASE_ : Union[float, Iterable[float]] = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> PIL.Image.Image:
'''simple docstring'''
A: int = do_resize if do_resize is not None else self.do_resize
A: List[str] = size if size is not None else self.size
A: List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ )
A: Optional[Any] = resample if resample is not None else self.resample
A: Dict = do_rescale if do_rescale is not None else self.do_rescale
A: Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
A: Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A: Optional[Any] = image_mean if image_mean is not None else self.image_mean
A: List[str] = image_std if image_std is not None else self.image_std
A: int = apply_ocr if apply_ocr is not None else self.apply_ocr
A: Any = ocr_lang if ocr_lang is not None else self.ocr_lang
A: Tuple = tesseract_config if tesseract_config is not None else self.tesseract_config
A: Optional[int] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
A: Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
A: Any = []
A: Optional[Any] = []
for image in images:
A , A: List[Any] = apply_tesseract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
words_batch.append(SCREAMING_SNAKE_CASE_ )
boxes_batch.append(SCREAMING_SNAKE_CASE_ )
if do_resize:
A: Any = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
A: Dict = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
A: int = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
A: Tuple = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
A: Any = BatchFeature(data={'''pixel_values''': images} , tensor_type=SCREAMING_SNAKE_CASE_ )
if apply_ocr:
A: Optional[Any] = words_batch
A: List[str] = boxes_batch
return data
| 334 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger()
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : nn.Module
UpperCamelCase_ : List[nn.Module] = field(default_factory=UpperCAmelCase_ )
UpperCamelCase_ : list = field(default_factory=UpperCAmelCase_ )
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tensor , SCREAMING_SNAKE_CASE_ : Tensor ) -> int:
'''simple docstring'''
A: List[str] = len(list(m.modules() ) ) == 1 or isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ) or isinstance(SCREAMING_SNAKE_CASE_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(SCREAMING_SNAKE_CASE_ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tensor ) -> Dict:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(SCREAMING_SNAKE_CASE_ )
[x.remove() for x in self.handles]
return self
@property
def _snake_case ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return list(filter(lambda SCREAMING_SNAKE_CASE_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
UpperCamelCase_ : nn.Module
UpperCamelCase_ : nn.Module
UpperCamelCase_ : int = 0
UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ )
UpperCamelCase_ : List = field(default_factory=UpperCAmelCase_ )
def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Tensor ) -> Optional[Any]:
'''simple docstring'''
A: Dict = Tracker(self.dest )(SCREAMING_SNAKE_CASE_ ).parametrized
A: Tuple = Tracker(self.src )(SCREAMING_SNAKE_CASE_ ).parametrized
A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.src_skip , SCREAMING_SNAKE_CASE_ ) )
A: str = list(filter(lambda SCREAMING_SNAKE_CASE_ : type(SCREAMING_SNAKE_CASE_ ) not in self.dest_skip , SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(SCREAMING_SNAKE_CASE_ )} operations while"""
f""" destination module has {len(SCREAMING_SNAKE_CASE_ )}.""" )
for dest_m, src_m in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase = True ) -> Any:
print(F"""Converting {name}...""" )
with torch.no_grad():
A: Union[str, Any] = timm.create_model(__lowercase , pretrained=__lowercase ).eval()
A: List[str] = ResNetForImageClassification(__lowercase ).eval()
A: int = ModuleTransfer(src=__lowercase , dest=__lowercase )
A: List[str] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(__lowercase )
assert torch.allclose(from_model(__lowercase ) , our_model(__lowercase ).logits ), "The model logits don't match the original one."
A: str = F"""resnet{'-'.join(name.split('resnet' ) )}"""
print(__lowercase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=__lowercase , )
# we can use the convnext one
A: Any = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=__lowercase , )
print(F"""Pushed {checkpoint_name}""" )
def SCREAMING_SNAKE_CASE( __lowercase , __lowercase = None , __lowercase = True ) -> List[Any]:
A: Union[str, Any] = '''imagenet-1k-id2label.json'''
A: Union[str, Any] = 1_0_0_0
A: Optional[int] = (1, num_labels)
A: Dict = '''huggingface/label-files'''
A: Any = num_labels
A: Union[str, Any] = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type='''dataset''' ) , '''r''' ) )
A: Optional[int] = {int(__lowercase ): v for k, v in idalabel.items()}
A: Optional[int] = idalabel
A: List[str] = {v: k for k, v in idalabel.items()}
A: str = partial(__lowercase , num_labels=__lowercase , idalabel=__lowercase , labelaid=__lowercase )
A: Optional[Any] = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(__lowercase , names_to_config[model_name] , __lowercase , __lowercase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowercase , __lowercase , __lowercase , __lowercase )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 334 | 1 |
import datasets
__A = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n"
__A = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n"
__A = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n"
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"),
}) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]) ->Union[str, Any]:
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_)}
| 10 |
'''simple docstring'''
class _a :
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase = {} # Mapping from char to TrieNode
UpperCAmelCase = False
def A ( self : int , lowercase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(lowercase )
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
UpperCAmelCase = TrieNode()
UpperCAmelCase = curr.nodes[char]
UpperCAmelCase = True
def A ( self : Optional[int] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase = self
for char in word:
if char not in curr.nodes:
return False
UpperCAmelCase = curr.nodes[char]
return curr.is_leaf
def A ( self : str , lowercase : str ):
'''simple docstring'''
def _delete(lowercase : TrieNode , lowercase : str , lowercase : int ) -> bool:
if index == len(lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
UpperCAmelCase = False
return len(curr.nodes ) == 0
UpperCAmelCase = word[index]
UpperCAmelCase = curr.nodes.get(lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
UpperCAmelCase = _delete(lowercase , lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowercase , 0 )
def snake_case_ (_a : TrieNode , _a : str ):
if node.is_leaf:
print(_a , end=''' ''' )
for key, value in node.nodes.items():
print_words(_a , word + key )
def snake_case_ ():
UpperCAmelCase = '''banana bananas bandana band apple all beast'''.split()
UpperCAmelCase = TrieNode()
root.insert_many(_a )
# print_words(root, "")
assert all(root.find(_a ) for word in words )
assert root.find('''banana''' )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
assert root.find('''apple''' )
assert root.find('''all''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def snake_case_ (_a : str , _a : bool ):
print(str(_a ) , '''works!''' if passes else '''doesn\'t work :(''' )
def snake_case_ ():
assert test_trie()
def snake_case_ ():
print_results('''Testing trie functionality''' , test_trie() )
if __name__ == "__main__":
main()
| 34 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ : Union[str, Any] = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Dict = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : int = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : str = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case__ : Tuple = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = ['LayoutLMv2FeatureExtractor']
snake_case__ : Dict = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
snake_case__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : list , __snake_case : list , __snake_case : int ) -> list:
__A : str = len(__snake_case )
__A : Tuple = [[0] * n for i in range(__snake_case )]
for i in range(__snake_case ):
__A : Optional[int] = y_points[i]
for i in range(2 , __snake_case ):
for j in range(__snake_case , __snake_case ):
__A : Union[str, Any] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 190 |
'''simple docstring'''
import math
import qiskit
def _lowerCAmelCase ( __snake_case : int = 1 , __snake_case : int = 1 , __snake_case : int = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
or isinstance(__snake_case , __snake_case )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != input_a)
or (math.floor(__snake_case ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__A : int = qiskit.QuantumRegister(4 , 'qr' )
__A : Optional[int] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
__A : Union[str, Any] = [input_a, input_a, carry_in]
__A : Dict = qiskit.QuantumCircuit(__snake_case , __snake_case )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__snake_case ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__snake_case ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__snake_case ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __snake_case ) # measure the last two qbits
__A : str = qiskit.Aer.get_backend('aer_simulator' )
__A : Any = qiskit.execute(__snake_case , __snake_case , shots=10_00 )
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(f"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""") | 190 | 1 |
"""simple docstring"""
a = {str(digit): digit**5 for digit in range(10)}
def _snake_case ( _snake_case : int ) -> int:
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_snake_case ) )
def _snake_case ( ) -> int:
'''simple docstring'''
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_snake_case ) )
if __name__ == "__main__":
print(solution())
| 271 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a = logging.getLogger(__name__)
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str
UpperCAmelCase : List[str]
UpperCAmelCase : Optional[List[str]]
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : List[int]
UpperCAmelCase : List[int]
UpperCAmelCase : Optional[List[int]] = None
UpperCAmelCase : Optional[List[int]] = None
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = '''train'''
UpperCAmelCase : Tuple = '''dev'''
UpperCAmelCase : int = '''test'''
class lowercase_ :
'''simple docstring'''
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : str ):
raise NotImplementedError
@staticmethod
def lowerCAmelCase_ ( _UpperCAmelCase : List[InputExample] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : List[str]="[CLS]" , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Tuple="[SEP]" , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : str=0 , _UpperCAmelCase : Optional[int]=0 , _UpperCAmelCase : Any=-100 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=True , ):
_A = {label: i for i, label in enumerate(_UpperCAmelCase )}
_A = []
for ex_index, example in enumerate(_UpperCAmelCase ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' , _UpperCAmelCase , len(_UpperCAmelCase ) )
_A = []
_A = []
for word, label in zip(example.words , example.labels ):
_A = tokenizer.tokenize(_UpperCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_UpperCAmelCase ) > 0:
tokens.extend(_UpperCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_A = tokenizer.num_special_tokens_to_add()
if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count:
_A = tokens[: (max_seq_length - special_tokens_count)]
_A = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_A = [sequence_a_segment_id] * len(_UpperCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_A = [cls_token] + tokens
_A = [pad_token_label_id] + label_ids
_A = [cls_token_segment_id] + segment_ids
_A = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_A = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase )
# Zero-pad up to the sequence length.
_A = max_seq_length - len(_UpperCAmelCase )
if pad_on_left:
_A = ([pad_token] * padding_length) + input_ids
_A = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_A = ([pad_token_segment_id] * padding_length) + segment_ids
_A = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(_UpperCAmelCase ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(_UpperCAmelCase ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(_UpperCAmelCase ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(_UpperCAmelCase ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(_UpperCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_A = None
features.append(
InputFeatures(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , label_ids=_UpperCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[InputFeatures]
UpperCAmelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _UpperCAmelCase : TokenClassificationTask , _UpperCAmelCase : str , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Split = Split.train , ):
# Load data features from cache or dataset file
_A = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_A = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''' )
_A = torch.load(_UpperCAmelCase )
else:
logger.info(F'''Creating features from dataset file at {data_dir}''' )
_A = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
_A = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , _UpperCAmelCase )
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : int , _UpperCAmelCase : Union[str, Any] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : List[InputFeatures]
UpperCAmelCase : int = -100
def __init__( self : int , _UpperCAmelCase : TokenClassificationTask , _UpperCAmelCase : str , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Split = Split.train , ):
_A = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
_A = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_A = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_A = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCAmelCase_ ( self : Dict ):
_A = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Dict , _UpperCAmelCase : Optional[int] ):
return self.features[i]
| 271 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase__ = logging.getLogger(__name__)
class lowerCAmelCase__ ( a):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None) -> int:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_A : int = None
def _lowerCamelCase ( self , __lowerCamelCase) -> List[Any]:
logger.info("initializing retrieval")
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized")
# needs to be set manually
_A : Any = self._infer_socket_ifname()
# avoid clash with the NCCL port
_A : int = str(distributed_port + 1)
_A : Tuple = dist.new_group(ranks=__lowerCamelCase , backend="gloo")
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main")
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def _lowerCamelCase ( self) -> Optional[Any]:
return dist.get_rank(group=self.process_group) == 0
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa) -> Union[str, Any]:
_A : Tuple = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase)
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group)
return target_tensor
def _lowerCamelCase ( self) -> List[str]:
_A : Any = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_A : Optional[Any] = next((addr for addr in addrs if addr.startswith("e")) , __lowerCamelCase)
return ifname
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_A , _A : int = self._main_retrieve(__lowerCamelCase , __lowerCamelCase)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase)
# distributed training
_A : Tuple = dist.get_world_size(group=self.process_group)
# gather logic
_A : Optional[Any] = None
if self._is_main():
_A : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(__lowerCamelCase)]
dist.gather(torch.tensor(__lowerCamelCase) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group)
# scatter logic
_A : List[str] = question_hidden_states.shape[0]
_A : str = []
_A : int = []
if self._is_main():
assert len(__lowerCamelCase) == world_size
_A , _A : Tuple = self._main_retrieve(torch.cat(__lowerCamelCase).numpy() , __lowerCamelCase)
_A , _A : Tuple = torch.tensor(__lowerCamelCase), torch.tensor(__lowerCamelCase)
_A : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase)
_A : Any = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase)
_A : int = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa)
_A : List[str] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase)
| 11 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def _lowerCAmelCase ( UpperCamelCase_ ):
create_state_space_tree(_A , [] , 0 )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if index == len(_A ):
print(_A )
return
create_state_space_tree(_A , _A , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(_A , _A , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__magic_name__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 353 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : List[str] = '''xlm-prophetnet'''
__lowercase : Dict = ['''past_key_values''']
__lowercase : Any = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 3_0_5_2_2 , lowerCAmelCase__ = 1_0_2_4 , lowerCAmelCase__ = 4_0_9_6 , lowerCAmelCase__ = 1_2 , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 4_0_9_6 , lowerCAmelCase__ = 1_2 , lowerCAmelCase__ = 1_6 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 5_1_2 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 1_2_8 , lowerCAmelCase__ = False , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = True , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 2 , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = num_encoder_layers
__SCREAMING_SNAKE_CASE = num_encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = num_decoder_layers
__SCREAMING_SNAKE_CASE = num_decoder_attention_heads
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = init_std # Normal(0, this parameter)
__SCREAMING_SNAKE_CASE = activation_function
# parameters for xlmprophetnet
__SCREAMING_SNAKE_CASE = ngram
__SCREAMING_SNAKE_CASE = num_buckets
__SCREAMING_SNAKE_CASE = relative_max_distance
__SCREAMING_SNAKE_CASE = disable_ngram_loss
__SCREAMING_SNAKE_CASE = eps
# 3 Types of Dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = use_cache
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def snake_case_ ( self):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def snake_case_ ( self , lowerCAmelCase__):
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""")
| 255 | 0 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
__snake_case = JukeboxTokenizer
__snake_case = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def __lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
import torch
a = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
a = tokenizer(**self.metas )['''input_ids''']
# fmt: off
a = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
import torch
a = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
a = tokenizer(**self.metas )['''input_ids''']
# fmt: off
a = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 0 |
import math
def _a ( a :int ) -> list:
a = [True] * n
a = False
a = False
a = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
a = i * 2
while index < n:
a = False
a = index + i
a = [2]
for i in range(3 , a , 2 ):
if is_prime[i]:
primes.append(a )
return primes
def _a ( a :int = 999_966_663_333 ) -> int:
a = math.floor(math.sqrt(a ) ) + 100
a = prime_sieve(a )
a = 0
a = 0
a = primes[prime_index]
while (last_prime**2) <= limit:
a = primes[prime_index + 1]
a = last_prime**2
a = next_prime**2
# Get numbers divisible by lps(current)
a = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
a = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
a = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
a = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 0 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Union[str, Any] ="""naver-clova-ix/donut-base-finetuned-docvqa"""
lowercase : Optional[int] =(
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
lowercase : Dict ="""document_qa"""
lowercase : List[Any] =AutoProcessor
lowercase : Tuple =VisionEncoderDecoderModel
lowercase : Optional[int] =["""image""", """text"""]
lowercase : Any =["""text"""]
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
if not is_vision_available():
raise ValueError('''Pillow must be installed to use the DocumentQuestionAnsweringTool.''' )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
lowercase_ :Optional[int] = task_prompt.replace('''{user_input}''' , UpperCamelCase_ )
lowercase_ :List[Any] = self.pre_processor.tokenizer(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' ).input_ids
lowercase_ :Dict = self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase ( self , UpperCamelCase_ ):
return self.model.generate(
inputs['''pixel_values'''].to(self.device ) , decoder_input_ids=inputs['''decoder_input_ids'''].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCamelCase_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCamelCase_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCamelCase_ , ).sequences
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Any = self.pre_processor.batch_decode(UpperCamelCase_ )[0]
lowercase_ :Any = sequence.replace(self.pre_processor.tokenizer.eos_token , '''''' )
lowercase_ :int = sequence.replace(self.pre_processor.tokenizer.pad_token , '''''' )
lowercase_ :Dict = re.sub(R'''<.*?>''' , '''''' , UpperCamelCase_ , count=1 ).strip() # remove first task start token
lowercase_ :Dict = self.pre_processor.tokenajson(UpperCamelCase_ )
return sequence["answer"]
| 252 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase : List[Any] , **lowercase : Optional[int] ):
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 282 |
from collections.abc import Sequence
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def a_ ( __lowercase : Sequence[float] , __lowercase : float ) -> float:
_snake_case = 0.0
for coeff in reversed(__lowercase ):
_snake_case = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : Optional[int] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 282 | 1 |
'''simple docstring'''
UpperCAmelCase = {}
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowerCAmelCase = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowerCAmelCase = _calculate(days - 1 , _SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowerCAmelCase = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowerCAmelCase = _calculate(days - 1 , _SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = state_late + state_absent + state_ontime
lowerCAmelCase = prizestrings
return prizestrings
def _snake_case ( _SCREAMING_SNAKE_CASE : int = 30 ) -> int:
"""simple docstring"""
return _calculate(_SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 187 |
'''simple docstring'''
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
pass
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
pass
class __snake_case:
'''simple docstring'''
def __init__( self ) -> int:
lowerCAmelCase = [
[],
[],
[],
]
def __snake_case ( self , A_ , A_ ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(A_ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __snake_case ( self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ) -> str:
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class __snake_case:
'''simple docstring'''
def __init__( self ) -> Dict:
lowerCAmelCase = []
def __snake_case ( self , A_ ) -> None:
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(A_ )
def __snake_case ( self ) -> int:
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
lowerCAmelCase = min(self.queue )
self.queue.remove(A_ )
return data
def __str__( self ) -> str:
return str(self.queue )
def _snake_case ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue() | 187 | 1 |
import os
import string
import sys
SCREAMING_SNAKE_CASE : int = 1 << 8
SCREAMING_SNAKE_CASE : int = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
SCREAMING_SNAKE_CASE : Tuple = KEYMAP["up"]
SCREAMING_SNAKE_CASE : Union[str, Any] = KEYMAP["left"]
if sys.platform == "win32":
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = {
B"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
B"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
B"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
B"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
B"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
SCREAMING_SNAKE_CASE : Dict = ord(str(i))
def UpperCamelCase_( ) -> Optional[Any]:
if os.name == "nt":
import msvcrt
_lowercase : Tuple = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowerCamelCase_ ) == 0:
# Read the keystroke
_lowercase : List[str] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowercase : Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowercase : Any = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowerCamelCase_ )
if ord(lowerCamelCase_ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowercase : int = chr(KEYMAP['esc'] )
except KeyError:
_lowercase : Union[str, Any] = cha[1]
else:
_lowercase : str = ch.decode(lowerCamelCase_ )
else:
_lowercase : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowercase : str = sys.stdin.fileno()
_lowercase : List[str] = termios.tcgetattr(lowerCamelCase_ )
try:
tty.setraw(lowerCamelCase_ )
_lowercase : Any = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowerCamelCase_ , termios.TCSADRAIN , lowerCamelCase_ )
return ch
def UpperCamelCase_( ) -> Dict:
_lowercase : List[Any] = get_raw_chars()
if ord(lowerCamelCase_ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowerCamelCase_ ) == KEYMAP["esc"]:
_lowercase : Dict = get_raw_chars()
if ord(lowerCamelCase_ ) == KEYMAP["mod_int"]:
_lowercase : Union[str, Any] = get_raw_chars()
if ord(lowerCamelCase_ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowerCamelCase_ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowerCamelCase_ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 21 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ : Union[str, Any] = 16
UpperCAmelCase_ : int = 32
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Accelerator , __magic_name__ : int = 16 , __magic_name__ : str = "bert-base-cased" ) -> Dict:
"""simple docstring"""
UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(__magic_name__ )
UpperCamelCase :Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase :List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__magic_name__ , max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase :List[Any] = datasets.map(
__magic_name__ , batched=__magic_name__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__magic_name__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase :Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__magic_name__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(__magic_name__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
UpperCamelCase :List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
UpperCamelCase :List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__magic_name__ , collate_fn=__magic_name__ , batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase :Union[str, Any] = config["""lr"""]
UpperCamelCase :List[str] = int(config["""num_epochs"""] )
UpperCamelCase :str = int(config["""seed"""] )
UpperCamelCase :Dict = int(config["""batch_size"""] )
UpperCamelCase :Union[str, Any] = args.model_name_or_path
set_seed(__magic_name__ )
UpperCamelCase , UpperCamelCase :Dict = get_dataloaders(__magic_name__ , __magic_name__ , __magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase :List[str] = AutoModelForSequenceClassification.from_pretrained(__magic_name__ , return_dict=__magic_name__ )
# Instantiate optimizer
UpperCamelCase :Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__magic_name__ )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase :Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
UpperCamelCase :Any = 1
UpperCamelCase :Dict = (len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase :List[Any] = get_linear_schedule_with_warmup(
optimizer=__magic_name__ , num_warmup_steps=0 , num_training_steps=__magic_name__ , )
else:
UpperCamelCase :Any = DummyScheduler(__magic_name__ , total_num_steps=__magic_name__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :str = accelerator.prepare(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase :int = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase :Tuple = 0
# Now we train the model
UpperCamelCase :Any = evaluate.load("""glue""" , """mrpc""" )
UpperCamelCase :Tuple = 0
UpperCamelCase :List[Any] = {}
for epoch in range(__magic_name__ , __magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
UpperCamelCase :List[str] = model(**__magic_name__ )
UpperCamelCase :Dict = outputs.loss
UpperCamelCase :Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCamelCase :str = 0
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase :Optional[int] = model(**__magic_name__ )
UpperCamelCase :List[Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase , UpperCamelCase :Optional[int] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__magic_name__ ) - 1:
UpperCamelCase :Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__magic_name__ , references=__magic_name__ , )
UpperCamelCase :List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __magic_name__ )
UpperCamelCase :Dict = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
UpperCamelCase :str = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
"""simple docstring"""
UpperCamelCase :List[str] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=__magic_name__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__magic_name__ , )
parser.add_argument(
"""--output_dir""" , type=__magic_name__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=__magic_name__ , default=__magic_name__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=__magic_name__ , default=3 , help="""Number of train epochs.""" , )
UpperCamelCase :str = parser.parse_args()
UpperCamelCase :Any = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(__magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 38 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = PriorTransformer
lowerCamelCase = 'hidden_states'
@property
def snake_case__ ( self : Optional[Any] )-> Optional[int]:
'''simple docstring'''
A__ = 4
A__ = 8
A__ = 7
A__ = floats_tensor((batch_size, embedding_dim) ).to(lowercase_ )
A__ = floats_tensor((batch_size, embedding_dim) ).to(lowercase_ )
A__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self : Tuple,lowercase_ : List[str]=0 )-> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(lowercase_ )
A__ = 4
A__ = 8
A__ = 7
A__ = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
A__ = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
A__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case__ ( self : Union[str, Any] )-> Dict:
'''simple docstring'''
return (4, 8)
@property
def snake_case__ ( self : List[str] )-> Union[str, Any]:
'''simple docstring'''
return (4, 8)
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
A__ = {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
A__ = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
A__ , A__ = PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy',output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info['missing_keys'] ),0 )
model.to(lowercase_ )
A__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case__ ( self : Optional[int] )-> Tuple:
'''simple docstring'''
A__ , A__ = self.prepare_init_args_and_inputs_for_common()
A__ = self.model_class(**lowercase_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2],lowercase_ )
def snake_case__ ( self : List[str] )-> int:
'''simple docstring'''
A__ = PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
A__ = model.to(lowercase_ )
if hasattr(lowercase_,'set_default_attn_processor' ):
model.set_default_attn_processor()
A__ = self.get_dummy_seed_input()
with torch.no_grad():
A__ = model(**lowercase_ )[0]
A__ = output[0, :5].flatten().cpu()
print(lowercase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
A__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(lowercase_,lowercase_,rtol=1E-2 ) )
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any],lowercase_ : Optional[int]=1,lowercase_ : List[str]=7_6_8,lowercase_ : List[str]=7_7,lowercase_ : List[Any]=0 )-> Any:
'''simple docstring'''
torch.manual_seed(lowercase_ )
A__ = batch_size
A__ = embedding_dim
A__ = num_embeddings
A__ = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
A__ = torch.randn((batch_size, embedding_dim) ).to(lowercase_ )
A__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[3_7, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def snake_case__ ( self : Optional[int],lowercase_ : Optional[Any],lowercase_ : Optional[Any] )-> Dict:
'''simple docstring'''
A__ = PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior',subfolder='prior' )
model.to(lowercase_ )
A__ = self.get_dummy_seed_input(seed=lowercase_ )
with torch.no_grad():
A__ = model(**lowercase_ )[0]
assert list(sample.shape ) == [1, 7_6_8]
A__ = sample[0, :8].flatten().cpu()
print(lowercase_ )
A__ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_,lowercase_,atol=1E-3 )
| 282 |
import random
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : str ) -> tuple:
'''simple docstring'''
A__ , A__ , A__ = [], [], []
for element in data:
if element < pivot:
less.append(SCREAMING_SNAKE_CASE__ )
elif element > pivot:
greater.append(SCREAMING_SNAKE_CASE__ )
else:
equal.append(SCREAMING_SNAKE_CASE__ )
return less, equal, greater
def _snake_case( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if index >= len(SCREAMING_SNAKE_CASE__ ) or index < 0:
return None
A__ = items[random.randint(0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )]
A__ = 0
A__ , A__ , A__ = _partition(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# must be in larger
else:
return quick_select(SCREAMING_SNAKE_CASE__ , index - (m + count) )
| 282 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = """bert-generation"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[int]=5_03_58 , SCREAMING_SNAKE_CASE__ : Optional[Any]=10_24 , SCREAMING_SNAKE_CASE__ : List[str]=24 , SCREAMING_SNAKE_CASE__ : Dict=16 , SCREAMING_SNAKE_CASE__ : str=40_96 , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : int=5_12 , SCREAMING_SNAKE_CASE__ : Any=0.0_2 , SCREAMING_SNAKE_CASE__ : Any=1e-1_2 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=2 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : str="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> int:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
| 229 | '''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_ ( snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCamelCase_ ( snake_case_ : Any ) -> Any:
'''simple docstring'''
__lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
__lowerCAmelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
__lowerCAmelCase = mam_aaa["""model"""]
remove_ignore_keys_(snake_case_ )
__lowerCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__lowerCAmelCase = MaMaaaConfig(
vocab_size=snake_case_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
__lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCAmelCase = MaMaaaForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ , strict=snake_case_ )
__lowerCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : str = parser.parse_args()
_A : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 229 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : list , lowercase__ : int ) -> Optional[int]:
'''simple docstring'''
if len(lowercase__ ) <= 1 or n <= 1:
return
insert_next(lowercase__ , n - 1 )
rec_insertion_sort(lowercase__ , n - 1 )
def _snake_case ( lowercase__ : list , lowercase__ : int ) -> int:
'''simple docstring'''
if index >= len(lowercase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase_ , lowerCAmelCase_ :int = (
collection[index],
collection[index - 1],
)
insert_next(lowercase__ , index + 1 )
if __name__ == "__main__":
__UpperCAmelCase = input('Enter integers separated by spaces: ')
__UpperCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 1 |
"""simple docstring"""
import os
from math import logaa
def _snake_case ( lowercase__ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase_ :float = 0
lowerCAmelCase_ :Union[str, Any] = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = list(map(lowercase__ , line.split(""",""" ) ) )
if x * logaa(lowercase__ ) > largest:
lowerCAmelCase_ :Any = x * logaa(lowercase__ )
lowerCAmelCase_ :List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 | 1 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowerCamelCase : int = Mapping[str, np.ndarray]
lowerCamelCase : Union[str, Any] = Mapping[str, Any] # Is a nested dict.
lowerCamelCase : Dict = 0.0_1
@dataclasses.dataclass(frozen=A__ )
class A__ :
A__ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ = None
# Templates used to generate this protein (prediction-only)
A__ = None
# Chain corresponding to each parent
A__ = None
def _lowerCAmelCase ( _UpperCamelCase : str ) -> Protein:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =r'(\[[A-Z]+\]\n)'
_SCREAMING_SNAKE_CASE =[tag.strip() for tag in re.split(_UpperCamelCase , _UpperCamelCase ) if len(_UpperCamelCase ) > 0]
_SCREAMING_SNAKE_CASE =zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
_SCREAMING_SNAKE_CASE =["N", "CA", "C"]
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
for g in groups:
if "[PRIMARY]" == g[0]:
_SCREAMING_SNAKE_CASE =g[1][0].strip()
for i in range(len(_UpperCamelCase ) ):
if seq[i] not in residue_constants.restypes:
_SCREAMING_SNAKE_CASE ='X' # FIXME: strings are immutable
_SCREAMING_SNAKE_CASE =np.array(
[residue_constants.restype_order.get(_UpperCamelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_SCREAMING_SNAKE_CASE =[]
for axis in range(3 ):
tertiary.append(list(map(_UpperCamelCase , g[1][axis].split() ) ) )
_SCREAMING_SNAKE_CASE =np.array(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_SCREAMING_SNAKE_CASE =np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
_SCREAMING_SNAKE_CASE =np.zeros(
(
len(_UpperCamelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_UpperCamelCase , atom_mask=_UpperCamelCase , aatype=_UpperCamelCase , residue_index=np.arange(len(_UpperCamelCase ) ) , b_factors=_UpperCamelCase , )
def _lowerCAmelCase ( _UpperCamelCase : Protein , _UpperCamelCase : int = 0 ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
_SCREAMING_SNAKE_CASE =prot.parents
_SCREAMING_SNAKE_CASE =prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_SCREAMING_SNAKE_CASE =[p for i, p in zip(_UpperCamelCase , _UpperCamelCase ) if i == chain_id]
if parents is None or len(_UpperCamelCase ) == 0:
_SCREAMING_SNAKE_CASE =['N/A']
pdb_headers.append(f"PARENT {' '.join(_UpperCamelCase )}" )
return pdb_headers
def _lowerCAmelCase ( _UpperCamelCase : Protein , _UpperCamelCase : str ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =pdb_str.split('\n' )
_SCREAMING_SNAKE_CASE =prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
_SCREAMING_SNAKE_CASE =42
if prot.parents is not None and len(prot.parents ) > 0:
_SCREAMING_SNAKE_CASE =[]
if prot.parents_chain_index is not None:
_SCREAMING_SNAKE_CASE ={}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_UpperCamelCase ) , [] )
parent_dict[str(_UpperCamelCase )].append(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =max([int(_UpperCamelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_SCREAMING_SNAKE_CASE =parent_dict.get(str(_UpperCamelCase ) , ['N/A'] )
parents_per_chain.append(_UpperCamelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_SCREAMING_SNAKE_CASE =[['N/A']]
def make_parent_line(_UpperCamelCase : Sequence[str] ) -> str:
return f"PARENT {' '.join(_UpperCamelCase )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_SCREAMING_SNAKE_CASE =0
for i, l in enumerate(_UpperCamelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_UpperCamelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =parents_per_chain[chain_counter]
else:
_SCREAMING_SNAKE_CASE =['N/A']
out_pdb_lines.append(make_parent_line(_UpperCamelCase ) )
return "\n".join(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Protein ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =residue_constants.restypes + ['X']
def res_atoa(_UpperCamelCase : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
_SCREAMING_SNAKE_CASE =residue_constants.atom_types
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =prot.atom_mask
_SCREAMING_SNAKE_CASE =prot.aatype
_SCREAMING_SNAKE_CASE =prot.atom_positions
_SCREAMING_SNAKE_CASE =prot.residue_index.astype(np.intaa )
_SCREAMING_SNAKE_CASE =prot.b_factors
_SCREAMING_SNAKE_CASE =prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
_SCREAMING_SNAKE_CASE =get_pdb_headers(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
pdb_lines.extend(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =aatype.shape[0]
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =string.ascii_uppercase
_SCREAMING_SNAKE_CASE =None
# Add all atom sites.
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_UpperCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_SCREAMING_SNAKE_CASE ='ATOM'
_SCREAMING_SNAKE_CASE =atom_name if len(_UpperCamelCase ) == 4 else f" {atom_name}"
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE =1.00
_SCREAMING_SNAKE_CASE =atom_name[0] # Protein supports only C, N, O, S, this works.
_SCREAMING_SNAKE_CASE =''
_SCREAMING_SNAKE_CASE ='A'
if chain_index is not None:
_SCREAMING_SNAKE_CASE =chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_SCREAMING_SNAKE_CASE =(
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(_UpperCamelCase )
atom_index += 1
_SCREAMING_SNAKE_CASE =i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =chain_index[i + 1]
if should_terminate:
# Close the chain.
_SCREAMING_SNAKE_CASE ='TER'
_SCREAMING_SNAKE_CASE =(
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(_UpperCamelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_UpperCamelCase , _UpperCamelCase ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : Protein ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _lowerCAmelCase ( _UpperCamelCase : FeatureDict , _UpperCamelCase : ModelOutput , _UpperCamelCase : Optional[np.ndarray] = None , _UpperCamelCase : Optional[np.ndarray] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Sequence[str]] = None , _UpperCamelCase : Optional[Sequence[int]] = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=_UpperCamelCase , remark=_UpperCamelCase , parents=_UpperCamelCase , parents_chain_index=_UpperCamelCase , )
| 47 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCamelCase ( ) -> Any:
__SCREAMING_SNAKE_CASE :Tuple = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=a_ )
__SCREAMING_SNAKE_CASE :str = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=a_ )
env_command_parser(subparsers=a_ )
launch_command_parser(subparsers=a_ )
tpu_command_parser(subparsers=a_ )
test_command_parser(subparsers=a_ )
# Let's go
__SCREAMING_SNAKE_CASE :int = parser.parse_args()
if not hasattr(a_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(a_ )
if __name__ == "__main__":
main() | 191 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : List[str] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __A ( self : Tuple ) -> List[Any]:
__lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__lowerCamelCase = '''A painting of a squirrel eating a burger'''
__lowerCamelCase = jax.device_count()
__lowerCamelCase = num_samples * [prompt]
__lowerCamelCase = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = replicate(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = shard(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
__lowerCamelCase = sd_pipe(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_inference_steps=25 , jit=SCREAMING_SNAKE_CASE__ )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
__lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCamelCase = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __A ( self : List[str] ) -> Tuple:
__lowerCamelCase = '''stabilityai/stable-diffusion-2'''
__lowerCamelCase , __lowerCamelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(SCREAMING_SNAKE_CASE__ , subfolder='''scheduler''' )
__lowerCamelCase , __lowerCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , revision='''bf16''' , dtype=jnp.bfloataa , )
__lowerCamelCase = scheduler_params
__lowerCamelCase = '''A painting of a squirrel eating a burger'''
__lowerCamelCase = jax.device_count()
__lowerCamelCase = num_samples * [prompt]
__lowerCamelCase = sd_pipe.prepare_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = replicate(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = shard(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = jax.random.PRNGKey(0 )
__lowerCamelCase = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
__lowerCamelCase = sd_pipe(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , num_inference_steps=25 , jit=SCREAMING_SNAKE_CASE__ )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
__lowerCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCamelCase = images[0, 2_53:2_56, 2_53:2_56, -1]
__lowerCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCamelCase = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 354 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE__ : List[Any] = namedtuple("covid_data", "cases deaths recovered")
def __magic_name__ ( __lowerCAmelCase : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(__lowerCAmelCase ).content ).xpath(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}"
print(fmt.format(*covid_stats()))
| 339 | 0 |
"""simple docstring"""
def lowercase ( A_ , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
a : List[Any] = [False] * len(A_ )
a : int = []
queue.append(A_ )
a : int = True
while queue:
a : List[str] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(A_ )
a : Dict = True
a : Optional[int] = u
return visited[t]
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
a : int = [-1] * (len(A_ ))
a : List[Any] = 0
while bfs(A_ , A_ , A_ , A_ ):
a : Tuple = float("Inf" )
a : List[str] = sink
while s != source:
# Find the minimum value in select path
a : List[Any] = min(A_ , graph[parent[s]][s] )
a : str = parent[s]
max_flow += path_flow
a : Optional[Any] = sink
while v != source:
a : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
a : str = parent[v]
return max_flow
__lowercase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__lowercase , __lowercase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 40 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline
UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : Optional[int] = False
@property
def __snake_case ( self : Optional[Any]):
return 32
@property
def __snake_case ( self : Dict):
return 32
@property
def __snake_case ( self : Dict):
return self.time_input_dim
@property
def __snake_case ( self : Any):
return self.time_input_dim * 4
@property
def __snake_case ( self : str):
return 100
@property
def __snake_case ( self : str):
torch.manual_seed(0)
a : str = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a : Dict = UNetaDConditionModel(**__UpperCAmelCase)
return model
@property
def __snake_case ( self : str):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Union[str, Any]):
torch.manual_seed(0)
a : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = self.dummy_unet
a : int = self.dummy_movq
a : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , )
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0):
a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__UpperCAmelCase)
# create hint
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
if str(__UpperCAmelCase).startswith("mps"):
a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase)
else:
a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : str = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __snake_case ( self : Dict):
a : str = "cpu"
a : Tuple = self.get_dummy_components()
a : Dict = self.pipeline_class(**__UpperCAmelCase)
a : Optional[int] = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase))
a : Any = output.images
a : Any = pipe(
**self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0]
a : Union[str, Any] = image[0, -3:, -3:, -1]
a : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Tuple = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[str]):
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy")
a : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0
a : str = hint.permute(2 , 0 , 1).unsqueeze(0)
a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__UpperCAmelCase)
a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
a : int = pipeline.to(__UpperCAmelCase)
pipeline.set_progress_bar_config(disable=__UpperCAmelCase)
a : Tuple = "A robot, 4k photo"
a : Any = torch.Generator(device="cuda").manual_seed(0)
a , a : int = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a : str = torch.Generator(device="cuda").manual_seed(0)
a : Union[str, Any] = pipeline(
image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
| 40 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
import numpy as np
from transformers import Pipeline
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
A__ = np.max(SCREAMING_SNAKE_CASE__ , axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ )
A__ = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=SCREAMING_SNAKE_CASE__ )
class A ( _UpperCAmelCase ):
"""simple docstring"""
def snake_case__ ( self : Dict,**lowercase_ : Tuple )-> Tuple:
'''simple docstring'''
A__ = {}
if "second_text" in kwargs:
A__ = kwargs['second_text']
return preprocess_kwargs, {}, {}
def snake_case__ ( self : List[Any],lowercase_ : int,lowercase_ : Optional[int]=None )-> List[str]:
'''simple docstring'''
return self.tokenizer(lowercase_,text_pair=lowercase_,return_tensors=self.framework )
def snake_case__ ( self : str,lowercase_ : Dict )-> List[str]:
'''simple docstring'''
return self.model(**lowercase_ )
def snake_case__ ( self : Dict,lowercase_ : Optional[int] )-> Dict:
'''simple docstring'''
A__ = model_outputs.logits[0].numpy()
A__ = softmax(lowercase_ )
A__ = np.argmax(lowercase_ )
A__ = self.model.config.idalabel[best_class]
A__ = probabilities[best_class].item()
A__ = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 282 | 0 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __a , unittest.TestCase):
__a : Tuple = PegasusTokenizer
__a : Tuple = PegasusTokenizerFast
__a : str = True
__a : List[Any] = True
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : List[Any] = PegasusTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def __snake_case ( self , **_A ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , _A ) -> str:
'''simple docstring'''
return ("This is a test", "This is a test")
def __snake_case ( self ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = """</s>"""
_UpperCAmelCase : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_A ) , 11_03 )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase : Union[str, Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_UpperCAmelCase : Optional[int] = rust_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
_UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_UpperCAmelCase : Any = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_UpperCAmelCase : Any = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=_A ).input_ids[0]
self.assertListEqual(_A , _A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
_UpperCAmelCase : str = """To ensure a smooth flow of bank resolutions."""
_UpperCAmelCase : List[Any] = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_UpperCAmelCase : str = tokenizer([raw_input_str] , return_tensors=_A ).input_ids[0]
self.assertListEqual(_A , _A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = ["""This is going to be way too long.""" * 1_50, """short example"""]
_UpperCAmelCase : List[str] = ["""not super long but more than 5 tokens""", """tiny"""]
_UpperCAmelCase : Optional[Any] = self._large_tokenizer(_A , padding=_A , truncation=_A , return_tensors="""pt""" )
_UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=_A , max_length=5 , padding=_A , truncation=_A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_A ) == 2 # input_ids, attention_mask.
@slow
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = {"""input_ids""": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( __a , unittest.TestCase):
__a : Tuple = PegasusTokenizer
__a : Union[str, Any] = PegasusTokenizerFast
__a : Tuple = True
__a : List[str] = True
def __snake_case ( self ) -> int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Union[str, Any] = PegasusTokenizer(_A , offset=0 , mask_token_sent=_A , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def __snake_case ( self , **_A ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , _A ) -> Tuple:
'''simple docstring'''
return ("This is a test", "This is a test")
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase : int = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_UpperCAmelCase : List[Any] = rust_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
_UpperCAmelCase : List[str] = py_tokenizer([raw_input_str] , return_tensors=_A , add_special_tokens=_A ).input_ids[0]
self.assertListEqual(_A , _A )
@require_torch
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ["""This is going to be way too long.""" * 10_00, """short example"""]
_UpperCAmelCase : int = ["""not super long but more than 5 tokens""", """tiny"""]
_UpperCAmelCase : List[str] = self._large_tokenizer(_A , padding=_A , truncation=_A , return_tensors="""pt""" )
_UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=_A , max_length=5 , padding=_A , truncation=_A , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_A ) == 2 # input_ids, attention_mask.
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : str = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_UpperCAmelCase : List[Any] = self._large_tokenizer(_A ).input_ids
self.assertListEqual(
_A , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 246 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
class _UpperCAmelCase ( __a):
__a : Optional[Any] = ["""pixel_values"""]
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BICUBIC , _A = True , _A = None , _A = True , _A = 1 / 2_55 , _A = True , _A = IMAGENET_DEFAULT_MEAN , _A = IMAGENET_DEFAULT_STD , **_A , ) -> None:
'''simple docstring'''
super().__init__(**_A )
_UpperCAmelCase : List[Any] = size if size is not None else {"""shortest_edge""": 2_24}
_UpperCAmelCase : Optional[Any] = get_size_dict(_A , default_to_square=_A )
_UpperCAmelCase : int = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_UpperCAmelCase : List[Any] = get_size_dict(_A , param_name="""crop_size""" )
_UpperCAmelCase : Any = do_resize
_UpperCAmelCase : Optional[int] = size
_UpperCAmelCase : List[str] = resample
_UpperCAmelCase : Optional[Any] = do_center_crop
_UpperCAmelCase : int = crop_size
_UpperCAmelCase : Optional[int] = do_rescale
_UpperCAmelCase : Optional[int] = rescale_factor
_UpperCAmelCase : Any = do_normalize
_UpperCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __snake_case ( self , _A , _A , _A = PILImageResampling.BICUBIC , _A = None , **_A , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
_UpperCAmelCase : Any = int((2_56 / 2_24) * size["""shortest_edge"""] )
_UpperCAmelCase : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
_UpperCAmelCase : Tuple = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
_A , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_A , data_format=_A , **_A )
def __snake_case ( self , _A , _A , _A = None , **_A , ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase : List[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_A , size=(size["""height"""], size["""width"""]) , data_format=_A , **_A )
def __snake_case ( self , _A , _A , _A = None , **_A , ) -> np.ndarray:
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def __snake_case ( self , _A , _A , _A , _A = None , **_A , ) -> np.ndarray:
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __snake_case ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ) -> BatchFeature:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase : Union[str, Any] = resample if resample is not None else self.resample
_UpperCAmelCase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase : Tuple = image_std if image_std is not None else self.image_std
_UpperCAmelCase : Tuple = size if size is not None else self.size
_UpperCAmelCase : int = get_size_dict(_A , default_to_square=_A )
_UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase : Union[str, Any] = get_size_dict(_A , param_name="""crop_size""" )
_UpperCAmelCase : Optional[int] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase : Any = [to_numpy_array(_A ) for image in images]
if do_resize:
_UpperCAmelCase : Optional[Any] = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
_UpperCAmelCase : Optional[int] = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
_UpperCAmelCase : Tuple = [self.rescale(_A , _A ) for image in images]
if do_normalize:
_UpperCAmelCase : List[Any] = [self.normalize(_A , _A , _A ) for image in images]
_UpperCAmelCase : Dict = [to_channel_dimension_format(_A , _A ) for image in images]
_UpperCAmelCase : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=_A , tensor_type=_A )
| 246 | 1 |
import warnings
from functools import wraps
from typing import Callable
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> Callable:
"""simple docstring"""
@wraps(SCREAMING_SNAKE_CASE_ )
def _inner_fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
warnings.warn(
(f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future.") , SCREAMING_SNAKE_CASE_ , )
return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return _inner_fn
| 60 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 1_0_0 )-> int:
"""simple docstring"""
UpperCamelCase_ = (n * (n + 1) // 2) ** 2
UpperCamelCase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60 | 1 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
SCREAMING_SNAKE_CASE :Optional[Any] = re.compile(R'\s+')
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__lowerCamelCase , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
__A = [len(__lowerCamelCase ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__lowerCamelCase ), "line_max": max(__lowerCamelCase )}
def UpperCAmelCase ( a_ ) -> Any:
"""simple docstring"""
__A = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def UpperCAmelCase ( a_ , a_=5 ) -> str:
"""simple docstring"""
__A = ["auto-generated", "autogenerated", "automatically generated"]
__A = example["content"].splitlines()
for _, line in zip(range(__lowerCamelCase ) , __lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase ( a_ , a_=5 , a_=0.05 ) -> Dict:
"""simple docstring"""
__A = ["unit tests", "test file", "configuration file"]
__A = example["content"].splitlines()
__A = 0
__A = 0
# first test
for _, line in zip(range(__lowerCamelCase ) , __lowerCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__A = example["content"].count("\n" )
__A = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
__A = ["def ", "class ", "for ", "while "]
__A = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase ( a_ , a_=4 ) -> List[Any]:
"""simple docstring"""
__A = example["content"].splitlines()
__A = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
__A = tokenizer(example["content"] , truncation=__lowerCamelCase )["input_ids"]
__A = len(example["content"] ) / len(__lowerCamelCase )
return {"ratio": ratio}
def UpperCAmelCase ( a_ ) -> List[Any]:
"""simple docstring"""
__A = {}
results.update(get_hash(__lowerCamelCase ) )
results.update(line_stats(__lowerCamelCase ) )
results.update(alpha_stats(__lowerCamelCase ) )
results.update(char_token_ratio(__lowerCamelCase ) )
results.update(is_autogenerated(__lowerCamelCase ) )
results.update(is_config_or_test(__lowerCamelCase ) )
results.update(has_no_keywords(__lowerCamelCase ) )
results.update(has_few_assignments(__lowerCamelCase ) )
return results
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
if not check_uniques(__lowerCamelCase , __lowerCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
with open(__lowerCamelCase , "rb" ) as f_in:
with gzip.open(str(__lowerCamelCase ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(__lowerCamelCase , __lowerCamelCase )
os.unlink(__lowerCamelCase )
# Settings
SCREAMING_SNAKE_CASE :Union[str, Any] = HfArgumentParser(PreprocessingArguments)
SCREAMING_SNAKE_CASE :Union[str, Any] = parser.parse_args()
if args.num_workers is None:
SCREAMING_SNAKE_CASE :Optional[Any] = multiprocessing.cpu_count()
SCREAMING_SNAKE_CASE :List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
SCREAMING_SNAKE_CASE :List[str] = time.time()
SCREAMING_SNAKE_CASE :List[Any] = load_dataset(args.dataset_name, split='train')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
SCREAMING_SNAKE_CASE :Union[str, Any] = time.time()
SCREAMING_SNAKE_CASE :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
SCREAMING_SNAKE_CASE :int = set(ds.unique('hash'))
SCREAMING_SNAKE_CASE :Union[str, Any] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
SCREAMING_SNAKE_CASE :Optional[Any] = time.time()
SCREAMING_SNAKE_CASE :int = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
SCREAMING_SNAKE_CASE :Union[str, Any] = time.time()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :List[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
SCREAMING_SNAKE_CASE :Tuple = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
SCREAMING_SNAKE_CASE :Optional[int] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
SCREAMING_SNAKE_CASE :Optional[int] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
SCREAMING_SNAKE_CASE :Optional[int] = str(data_dir / f'''file-{file_number+1:012}.json''')
SCREAMING_SNAKE_CASE :Optional[int] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 15 |
'''simple docstring'''
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class a_ ( snake_case_ ):
'''simple docstring'''
def snake_case_( self ) -> Tuple:
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(A )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self._create_example_records()
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""] )
for i, r in enumerate(A ):
self.assertDictEqual(A , example_records[i] )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = self._create_example_records()
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
_SCREAMING_SNAKE_CASE = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def snake_case_( self ) -> Union[str, Any]: # checks what happens with missing columns
_SCREAMING_SNAKE_CASE = [{"""col_1""": 1}, {"""col_2""": """x"""}]
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
self.assertDictEqual(dset[0] , {"""col_1""": 1} )
self.assertDictEqual(dset[1] , {"""col_1""": None} ) # NB: first record is used for columns
def snake_case_( self ) -> Optional[Any]: # checks if the type can be inferred from the second record
_SCREAMING_SNAKE_CASE = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
_SCREAMING_SNAKE_CASE = Dataset.from_list(A )
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""" ) ) )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = Dataset.from_list([] )
self.assertEqual(len(A ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 58 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_a : int = False
@skip_mps
class _UpperCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
a : int =StableDiffusionAttendAndExcitePipeline
a : Any =False
a : Optional[Any] =TEXT_TO_IMAGE_PARAMS
a : Optional[int] =TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
a : Union[str, Any] =TEXT_TO_IMAGE_IMAGE_PARAMS
a : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase__ ( cls ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase__ ( cls ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64),layers_per_block=1,sample_size=32,in_channels=4,out_channels=4,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""),up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""),cross_attention_dim=32,attention_head_dim=(2, 4),use_linear_projection=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0085,beta_end=0.012,beta_schedule="""scaled_linear""",clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,)
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],latent_channels=4,sample_size=1_28,)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1e-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=10_00,hidden_act="""gelu""",projection_dim=5_12,)
__lowerCAmelCase = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = __lowerCAmelCase = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**__SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 64, 64, 3) )
__lowerCAmelCase = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
__lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE,1e-3 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase__ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2,expected_max_diff=7e-4 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCAmelCase ( unittest.TestCase ):
@classmethod
def lowerCamelCase__ ( cls ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(__SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase__ ( cls ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = torch.manual_seed(51 )
__lowerCAmelCase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""",safety_checker=__SCREAMING_SNAKE_CASE,torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__lowerCAmelCase = """a painting of an elephant with glasses"""
__lowerCAmelCase = [5, 7]
__lowerCAmelCase = pipe(
prompt=__SCREAMING_SNAKE_CASE,token_indices=__SCREAMING_SNAKE_CASE,guidance_scale=7.5,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,max_iter_to_alter=5,output_type="""numpy""",).images[0]
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 46 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : List[Any] = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : str = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 46 | 1 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
lowercase_ = 2_0_4_8
lowercase_ = 4_0_9_6
lowercase_ = 4_2
lowercase_ = os.environ.pop('PROCESS_TRAIN', 'false')
lowercase_ = {'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4}
def a ( A__ : int ) -> Union[str, Any]:
"""simple docstring"""
def choose_first(A__ : Dict , A__ : int=False ):
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
if len(_lowerCAmelCase ) == 1:
_lowercase =answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
_lowercase ={k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
_lowercase ={"""id""": example["""id"""]}
_lowercase =example["""annotations"""]
_lowercase =annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
_lowercase =["""yes"""] if 1 in yes_no_answer else ["""no"""]
_lowercase =[]
_lowercase =[]
_lowercase =["""<cls>"""]
else:
_lowercase =["""short"""]
_lowercase =choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
_lowercase =["""long"""]
_lowercase =choose_first(annotation['long_answer'] , is_long_answer=_lowerCAmelCase )
_lowercase =[]
answer.update(_lowerCAmelCase )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
_lowercase =True
else:
_lowercase =False
_lowercase =["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , _lowerCAmelCase ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def a ( A__ : Dict , A__ : List[Any]=False ) -> List[Any]:
"""simple docstring"""
_lowercase =_get_single_answer(_lowerCAmelCase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_lowercase =example["""document"""]["""tokens"""]
_lowercase =[]
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(_lowerCAmelCase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
_lowercase =["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
_lowercase =example["""document"""]["""tokens"""]
_lowercase =answer["""start_token"""]
_lowercase =answer["""end_token"""]
_lowercase =[]
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
_lowercase =""" """.join(context[start_token:end_token] )
# checking above code
if assertion:
_lowercase =doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
_lowercase =doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
_lowercase =""" """.join([old[i] for i in range(len(_lowerCAmelCase ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , _lowerCAmelCase , end='\n' )
print('Old:' , _lowerCAmelCase , end='\n\n' )
return {
"context": " ".join(_lowerCAmelCase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def a ( A__ : Tuple , A__ : List[Any] , A__ : str=2048 , A__ : int=4096 , A__ : List[str]=True ) -> Optional[Any]:
"""simple docstring"""
_lowercase =get_context_and_ans(_lowerCAmelCase , assertion=_lowerCAmelCase )
_lowercase =out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
_lowercase =tokenizer(example['question']['text'] , out['context'] ).input_ids
_lowercase =input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_lowercase =[]
_lowercase =[]
_lowercase =input_ids[:q_len]
_lowercase =range(_lowerCAmelCase , len(_lowerCAmelCase ) , max_length - doc_stride )
for i in doc_start_indices:
_lowercase =i + max_length - q_len
_lowercase =input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(_lowerCAmelCase ),
"end_token": [-100] * len(_lowerCAmelCase ),
"category": category,
},
}
_lowercase =out["""context"""].split()
_lowercase =splitted_context[answer["""end_token"""]]
_lowercase =len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=_lowerCAmelCase , ).input_ids )
_lowercase =len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=_lowerCAmelCase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
_lowercase =len(tokenizer(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
_lowercase =input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
_lowercase =answer["""start_token"""]
_lowercase =answer["""end_token"""]
if assertion:
_lowercase =tokenizer.decode(_lowerCAmelCase )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , _lowerCAmelCase , end='\n\n' )
if len(_lowerCAmelCase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
_lowercase =input_ids[:q_len]
_lowercase =range(_lowerCAmelCase , len(_lowerCAmelCase ) , max_length - doc_stride )
_lowercase =[]
_lowercase =[]
_lowercase =[]
_lowercase =[] # null, yes, no, long, short
for i in doc_start_indices:
_lowercase =i + max_length - q_len
_lowercase =input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
_lowercase =start_token - i + q_len
_lowercase =end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
_lowercase =-100
_lowercase =-100
answers_category.append('null' )
_lowercase =inputs[-1][start_token : end_token + 1]
answers_start_token.append(_lowerCAmelCase )
answers_end_token.append(_lowerCAmelCase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(_lowerCAmelCase ) )
print('Old:' , tokenizer.decode(_lowerCAmelCase ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def a ( A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[str]=2048 , A__ : Union[str, Any]=4096 , A__ : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
_lowercase =get_strided_contexts_and_ans(
_lowerCAmelCase , _lowerCAmelCase , doc_stride=_lowerCAmelCase , max_length=_lowerCAmelCase , assertion=_lowerCAmelCase , )
return example
def a ( A__ : Optional[Any] , A__ : Any ) -> Union[str, Any]:
"""simple docstring"""
with jsonlines.open(_lowerCAmelCase , 'a' ) as writer:
for example in tqdm(_lowerCAmelCase , total=len(_lowerCAmelCase ) , desc='Saving samples ... ' ):
_lowercase =example["""labels"""]
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
lowercase_ = load_dataset('natural_questions')
lowercase_ = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base')
lowercase_ = data['train' if PROCESS_TRAIN == 'true' else 'validation']
lowercase_ = {
'tokenizer': tokenizer,
'doc_stride': DOC_STRIDE,
'max_length': MAX_LENGTH,
'assertion': False,
}
lowercase_ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
lowercase_ = data.remove_columns(['annotations', 'document', 'id', 'question'])
print(data)
np.random.seed(SEED)
lowercase_ = 'nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl'
save_to_disk(data, file_name=cache_file_name)
| 205 |
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : Dict = SwinConfig()
snake_case__ : Optional[Any] = swin_name.split("""_""" )
snake_case__ : Any = name_split[1]
snake_case__ : List[Any] = int(name_split[4] )
snake_case__ : int = int(name_split[3][-1] )
if model_size == "tiny":
snake_case__ : List[Any] = 96
snake_case__ : int = (2, 2, 6, 2)
snake_case__ : int = (3, 6, 12, 24)
elif model_size == "small":
snake_case__ : Union[str, Any] = 96
snake_case__ : Optional[Any] = (2, 2, 18, 2)
snake_case__ : str = (3, 6, 12, 24)
elif model_size == "base":
snake_case__ : Dict = 128
snake_case__ : str = (2, 2, 18, 2)
snake_case__ : Dict = (4, 8, 16, 32)
else:
snake_case__ : List[str] = 192
snake_case__ : str = (2, 2, 18, 2)
snake_case__ : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
snake_case__ : str = 21_841
else:
snake_case__ : List[str] = 1_000
snake_case__ : int = """huggingface/label-files"""
snake_case__ : Any = """imagenet-1k-id2label.json"""
snake_case__ : List[Any] = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : Dict = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case__ : Optional[int] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case__ : List[Any] = img_size
snake_case__ : Dict = num_classes
snake_case__ : Dict = embed_dim
snake_case__ : Optional[int] = depths
snake_case__ : int = num_heads
snake_case__ : Optional[int] = window_size
return config
def __snake_case( _lowerCAmelCase ) -> Dict:
if "patch_embed.proj" in name:
snake_case__ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case__ : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
snake_case__ : str = """encoder.""" + name
if "attn.proj" in name:
snake_case__ : List[str] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
snake_case__ : Tuple = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
snake_case__ : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
snake_case__ : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
snake_case__ : Union[str, Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
snake_case__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
snake_case__ : Tuple = """layernorm.weight"""
if name == "norm.bias":
snake_case__ : Union[str, Any] = """layernorm.bias"""
if "head" in name:
snake_case__ : Optional[int] = name.replace("""head""" , """classifier""" )
else:
snake_case__ : List[str] = """swin.""" + name
return name
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
snake_case__ : Optional[int] = orig_state_dict.pop(_lowerCAmelCase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case__ : Dict = key.split(""".""" )
snake_case__ : Optional[int] = int(key_split[1] )
snake_case__ : Union[str, Any] = int(key_split[3] )
snake_case__ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ : Optional[Any] = val[:dim, :]
snake_case__ : Tuple = val[
dim : dim * 2, :
]
snake_case__ : Dict = val[-dim:, :]
else:
snake_case__ : Tuple = val[
:dim
]
snake_case__ : int = val[
dim : dim * 2
]
snake_case__ : int = val[
-dim:
]
else:
snake_case__ : Union[str, Any] = val
return orig_state_dict
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase )
timm_model.eval()
snake_case__ : Optional[int] = get_swin_config(_lowerCAmelCase )
snake_case__ : Optional[Any] = SwinForImageClassification(_lowerCAmelCase )
model.eval()
snake_case__ : str = convert_state_dict(timm_model.state_dict() , _lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
snake_case__ : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : Dict = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
snake_case__ : Dict = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
snake_case__ : Optional[int] = image_processor(images=_lowerCAmelCase , return_tensors="""pt""" )
snake_case__ : Optional[Any] = timm_model(inputs["""pixel_values"""] )
snake_case__ : str = model(**_lowerCAmelCase ).logits
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print(f"Saving model {swin_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swin_name",
default="swin_tiny_patch4_window7_224",
type=str,
help="Name of the Swin timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__a = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 35 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
lowerCamelCase : Dict = {"target_lang": "fi", "source_lang": "en"}
lowerCamelCase : List[Any] = ">>zh<<"
lowerCamelCase : Tuple = "Helsinki-NLP/"
if is_torch_available():
lowerCamelCase : Optional[Any] = "pt"
elif is_tf_available():
lowerCamelCase : List[str] = "tf"
else:
lowerCamelCase : Any = "jax"
@require_sentencepiece
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MarianTokenizer
UpperCamelCase = False
UpperCamelCase = True
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().setUp()
lowerCamelCase_ = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>']
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = Path(self.tmpdirname )
save_json(A_ , save_dir / VOCAB_FILES_NAMES['vocab'] )
save_json(A_ , save_dir / VOCAB_FILES_NAMES['tokenizer_config_file'] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(A_ , save_dir / VOCAB_FILES_NAMES['source_spm'] )
copyfile(A_ , save_dir / VOCAB_FILES_NAMES['target_spm'] )
lowerCamelCase_ = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : Any , **A_ : Optional[Any] ) -> MarianTokenizer:
"""simple docstring"""
return MarianTokenizer.from_pretrained(self.tmpdirname , **A_ )
def a__ ( self : Dict , A_ : Dict ) -> Optional[int]:
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = '</s>'
lowerCamelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '</s>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '<pad>' )
self.assertEqual(len(A_ ) , 9 )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def a__ ( self : int ) -> int:
"""simple docstring"""
lowerCamelCase_ = MarianTokenizer.from_pretrained(f"""{ORG_NAME}opus-mt-en-de""" )
lowerCamelCase_ = en_de_tokenizer(['I am a small frog'] , return_tensors=A_ )
self.assertIsInstance(A_ , A_ )
lowerCamelCase_ = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(A_ , batch.input_ids[0] )
lowerCamelCase_ = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(A_ )
lowerCamelCase_ = [x.name for x in Path(A_ ).glob('*' )]
self.assertIn('source.spm' , A_ )
MarianTokenizer.from_pretrained(A_ )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = tok(
['I am a small frog' * 1000, 'I am a small frog'] , padding=A_ , truncation=A_ , return_tensors=A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = tok(['I am a tiny frog', 'I am a small frog'] , padding=A_ , return_tensors=A_ )
self.assertIsInstance(A_ , A_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = {'input_ids': [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='Helsinki-NLP/opus-mt-en-de' , revision='1a8c2263da11e68e50938f97e10cd57820bd504c' , decode_kwargs={'use_source_tokenizer': True} , )
def a__ ( self : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ = MarianTokenizer.from_pretrained('hf-internal-testing/test-marian-two-vocabs' )
lowerCamelCase_ = 'Tämä on testi'
lowerCamelCase_ = 'This is a test'
lowerCamelCase_ = [76, 7, 2047, 2]
lowerCamelCase_ = [69, 12, 11, 940, 2]
lowerCamelCase_ = tokenizer(A_ ).input_ids
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer(text_target=A_ ).input_ids
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokenizer.decode(A_ , skip_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 208 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCamelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase : List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCamelCase : Optional[Any] = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
lowerCamelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase : Union[str, Any] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCamelCase : Tuple = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowercase )
return [m.group(0 ) for m in matches]
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCamelCase_ = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
lowerCamelCase_ = collections.defaultdict(lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase ):
lowerCamelCase_ = None
if _re_tf_models.match(lowercase ) is not None:
lowerCamelCase_ = tf_models
lowerCamelCase_ = _re_tf_models.match(lowercase ).groups()[0]
elif _re_flax_models.match(lowercase ) is not None:
lowerCamelCase_ = flax_models
lowerCamelCase_ = _re_flax_models.match(lowercase ).groups()[0]
elif _re_pt_models.match(lowercase ) is not None:
lowerCamelCase_ = pt_models
lowerCamelCase_ = _re_pt_models.match(lowercase ).groups()[0]
if lookup_dict is not None:
while len(lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCamelCase_ = True
break
# Try again after removing the last word in the name
lowerCamelCase_ = ''.join(camel_case_split(lowercase )[:-1] )
lowerCamelCase_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCamelCase_ = list(lowercase )
all_models.sort()
lowerCamelCase_ = {'model_type': all_models}
lowerCamelCase_ = [pt_models[t] for t in all_models]
lowerCamelCase_ = [tf_models[t] for t in all_models]
lowerCamelCase_ = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCamelCase_ = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCamelCase_ = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCamelCase_ = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCamelCase_ = 'AutoTokenizer'
lowerCamelCase_ = [processors[t] for t in all_models]
return pd.DataFrame(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCamelCase_ = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
lowerCamelCase_ = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase , lowercase , lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase , lowercase ):
continue
# First extract all model_names
lowerCamelCase_ = []
for name in getattr(lowercase , lowercase ).values():
if isinstance(lowercase , lowercase ):
model_names.append(lowercase )
else:
model_names.extend(list(lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : Optional[int] ):
'''simple docstring'''
lowerCamelCase_ = get_frameworks_table()
lowerCamelCase_ = Dataset.from_pandas(lowercase )
lowerCamelCase_ = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=lowercase )
lowerCamelCase_ = Dataset.from_json(lowercase )
lowerCamelCase_ = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(lowercase ) )
}
lowerCamelCase_ = update_pipeline_and_auto_class_table(lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCamelCase_ = sorted(table.keys() )
lowerCamelCase_ = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
lowerCamelCase_ = Dataset.from_pandas(lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(lowercase , 'pipeline_tags.json' ) )
if commit_sha is not None:
lowerCamelCase_ = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
lowerCamelCase_ = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=lowercase , repo_type='dataset' , token=lowercase , commit_message=lowercase , )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCamelCase_ = transformers_module.pipelines.SUPPORTED_TASKS
lowerCamelCase_ = []
for key in pipeline_tasks:
if key not in in_table:
lowerCamelCase_ = pipeline_tasks[key]['pt']
if isinstance(lowercase , (list, tuple) ):
lowerCamelCase_ = model[0]
lowerCamelCase_ = model.__name__
if model not in in_table.values():
missing.append(lowercase )
if len(lowercase ) > 0:
lowerCamelCase_ = ', '.join(lowercase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
lowerCamelCase : Optional[int] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 208 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __snake_case ( __lowerCAmelCase ):
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: List[str] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase , 'hidden_sizes'))
self.parent.assertTrue(hasattr(lowercase , 'num_attention_heads'))
self.parent.assertTrue(hasattr(lowercase , 'num_encoder_blocks'))
class __snake_case :
def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[16, 32, 64, 1_28] , lowercase=[1, 4, 8, 16] , lowercase=[1, 2, 4, 8] , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=3 , lowercase=None , ) -> Any:
'''simple docstring'''
a__: Any = parent
a__: List[Any] = batch_size
a__: str = image_size
a__: int = num_channels
a__: List[Any] = num_encoder_blocks
a__: Any = sr_ratios
a__: Tuple = depths
a__: Dict = hidden_sizes
a__: Tuple = downsampling_rates
a__: Union[str, Any] = num_attention_heads
a__: str = is_training
a__: List[Any] = use_labels
a__: Dict = hidden_act
a__: Dict = hidden_dropout_prob
a__: Union[str, Any] = attention_probs_dropout_prob
a__: Optional[Any] = initializer_range
a__: Any = num_labels
a__: Optional[Any] = scope
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__: Optional[Any] = None
if self.use_labels:
a__: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a__: Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self) -> str:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__: List[Any] = SegformerModel(config=lowercase)
model.to(lowercase)
model.eval()
a__: Tuple = model(lowercase)
a__: str = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__: Optional[Any] = self.num_labels
a__: Union[str, Any] = SegformerForSemanticSegmentation(lowercase)
model.to(lowercase)
model.eval()
a__: int = model(lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
a__: int = model(lowercase , labels=lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__: Tuple = 1
a__: Optional[Any] = SegformerForSemanticSegmentation(config=lowercase)
model.to(lowercase)
model.eval()
a__: List[str] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(lowercase)
a__: Union[str, Any] = model(lowercase , labels=lowercase)
self.parent.assertGreater(result.loss , 0.0)
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: str = self.prepare_config_and_inputs()
a__ , a__ , a__: str = config_and_inputs
a__: Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
a__ = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a__ = True
a__ = False
a__ = False
a__ = False
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[int] = SegformerModelTester(self)
a__: Union[str, Any] = SegformerConfigTester(self , config_class=lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowercase)
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
a__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowercase)
@unittest.skip('SegFormer does not use inputs_embeds')
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase_ ( self) -> Optional[int]:
'''simple docstring'''
a__ , a__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: Union[str, Any] = model_class(lowercase)
a__: int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__: Union[str, Any] = [*signature.parameters.keys()]
a__: List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def lowerCamelCase_ ( self) -> List[str]:
'''simple docstring'''
a__ , a__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__: List[Any] = True
for model_class in self.all_model_classes:
a__: Tuple = True
a__: List[str] = False
a__: Optional[Any] = True
a__: Union[str, Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: Dict = model(**self._prepare_for_class(lowercase , lowercase))
a__: Optional[Any] = outputs.attentions
a__: Dict = sum(self.model_tester.depths)
self.assertEqual(len(lowercase) , lowercase)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__: List[str] = True
a__: List[str] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: str = model(**self._prepare_for_class(lowercase , lowercase))
a__: List[str] = outputs.attentions
self.assertEqual(len(lowercase) , lowercase)
# verify the first attentions (first block, first layer)
a__: Optional[int] = (self.model_tester.image_size // 4) ** 2
a__: Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a__: List[Any] = (self.model_tester.image_size // 32) ** 2
a__: Dict = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a__: Tuple = len(lowercase)
# Check attention is always last and order is fine
a__: Union[str, Any] = True
a__: Any = True
a__: Optional[int] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: int = model(**self._prepare_for_class(lowercase , lowercase))
self.assertEqual(out_len + 1 , len(lowercase))
a__: Dict = outputs.attentions
self.assertEqual(len(lowercase) , lowercase)
# verify the first attentions (first block, first layer)
a__: Any = (self.model_tester.image_size // 4) ** 2
a__: Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__: List[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__: Optional[int] = model(**self._prepare_for_class(lowercase , lowercase))
a__: List[str] = outputs.hidden_states
a__: Optional[int] = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowercase) , lowercase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a__ , a__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__: List[str] = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__: List[Any] = True
check_hidden_states_output(lowercase , lowercase , lowercase)
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
a__ , a__: Dict = self.model_tester.prepare_config_and_inputs_for_common()
a__: Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase):
continue
a__: Union[str, Any] = model_class(lowercase)
model.to(lowercase)
model.train()
a__: List[Any] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase)
a__: List[str] = model(**lowercase).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__: Optional[Any] = SegformerModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def __a ( ) ->Optional[Any]:
a__: Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self) -> Union[str, Any]:
'''simple docstring'''
a__: str = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__: List[str] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
lowercase)
a__: Optional[int] = prepare_img()
a__: int = image_processor(images=lowercase , return_tensors='pt')
a__: Dict = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__: List[Any] = model(lowercase)
a__: Tuple = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , lowercase)
a__: Optional[Any] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1e-4))
@slow
def lowerCamelCase_ ( self) -> Tuple:
'''simple docstring'''
a__: List[str] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__: int = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(lowercase)
a__: str = prepare_img()
a__: Optional[Any] = image_processor(images=lowercase , return_tensors='pt')
a__: Dict = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__: Optional[Any] = model(lowercase)
a__: Tuple = torch.Size((1, model.config.num_labels, 1_28, 1_28))
self.assertEqual(outputs.logits.shape , lowercase)
a__: Any = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1e-1))
@slow
def lowerCamelCase_ ( self) -> List[Any]:
'''simple docstring'''
a__: Optional[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__: Optional[int] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
lowercase)
a__: Tuple = prepare_img()
a__: Optional[Any] = image_processor(images=lowercase , return_tensors='pt')
a__: Any = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__: Tuple = model(lowercase)
a__: Tuple = outputs.logits.detach().cpu()
a__: Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=lowercase , target_sizes=[(5_00, 3_00)])
a__: str = torch.Size((5_00, 3_00))
self.assertEqual(segmentation[0].shape , lowercase)
a__: Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowercase)
a__: List[str] = torch.Size((1_28, 1_28))
self.assertEqual(segmentation[0].shape , lowercase)
| 290 | """simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase__ = logging.getLogger(__name__)
class __snake_case :
def __init__( self) -> Optional[int]:
'''simple docstring'''
a__: Optional[Any] = False
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
if not self.initialized:
a__: Optional[int] = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Optional[int] = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
self.retriever.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ , a__: str = self.retriever._main_retrieve(lowercase , lowercase)
return doc_ids, retrieved_doc_embeds
class __snake_case ( __lowerCAmelCase ):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None) -> int:
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowercase) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ')
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
a__: Any = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase)
for worker in self.retrieval_workers
])
def lowerCamelCase_ ( self) -> Any:
'''simple docstring'''
logger.info('initializing retrieval')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCamelCase_ ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
a__: int = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
a__ , a__: List[Any] = ray.get(random_worker.retrieve.remote(lowercase , lowercase))
else:
a__ , a__: Dict = self._main_retrieve(lowercase , lowercase)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
return super(lowercase , cls).get_tokenizers(lowercase , lowercase , **lowercase)
@classmethod
def lowerCamelCase_ ( cls , lowercase , lowercase , lowercase=None , **lowercase) -> Union[str, Any]:
'''simple docstring'''
a__: Optional[int] = kwargs.pop('config' , lowercase) or RagConfig.from_pretrained(lowercase , **lowercase)
a__: Union[str, Any] = RagTokenizer.from_pretrained(lowercase , config=lowercase)
a__: int = rag_tokenizer.question_encoder
a__: Any = rag_tokenizer.generator
if indexed_dataset is not None:
a__: List[Any] = 'custom'
a__: Optional[Any] = CustomHFIndex(config.retrieval_vector_size , lowercase)
else:
a__: Dict = cls._build_index(lowercase)
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 290 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , **UpperCAmelCase__ : Dict) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self : str , UpperCAmelCase__ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase__ : Tuple) ->List[str]:
'''simple docstring'''
return super().__call__(UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str , **UpperCAmelCase__ : int) ->Optional[Any]:
'''simple docstring'''
A__ = {}
if "candidate_labels" in kwargs:
A__ = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
A__ = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : int="This is a photo of {}.") ->Dict:
'''simple docstring'''
A__ = load_image(UpperCAmelCase__)
A__ = self.image_processor(images=[image] , return_tensors=self.framework)
A__ = candidate_labels
A__ = [hypothesis_template.format(UpperCAmelCase__) for x in candidate_labels]
A__ = self.tokenizer(UpperCAmelCase__ , return_tensors=self.framework , padding=UpperCAmelCase__)
A__ = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Tuple) ->Dict:
'''simple docstring'''
A__ = model_inputs.pop('''candidate_labels''')
A__ = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , UpperCAmelCase__):
A__ = text_inputs[0]
else:
# Batching case.
A__ = text_inputs[0][0]
A__ = self.model(**UpperCAmelCase__ , **UpperCAmelCase__)
A__ = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[Any]) ->str:
'''simple docstring'''
A__ = model_outputs.pop('''candidate_labels''')
A__ = model_outputs['''logits'''][0]
if self.framework == "pt":
A__ = logits.softmax(dim=-1).squeeze(-1)
A__ = probs.tolist()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__):
A__ = [scores]
elif self.framework == "tf":
A__ = stable_softmax(UpperCAmelCase__ , axis=-1)
A__ = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""")
A__ = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase__ , UpperCAmelCase__) , key=lambda UpperCAmelCase__: -x[0])
]
return result
| 231 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCamelCase : Optional[Any] = """facebook/wmt19-en-de"""
_lowerCamelCase : Optional[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCamelCase : int = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCamelCase : Union[str, Any] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCamelCase : int = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
_lowerCamelCase : int = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
_lowerCamelCase : str = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 231 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : int = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
A : Union[str, Any] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : int = np.random.randn(3, 4 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ), x.transpose() ) )
A : Tuple = np.random.randn(3, 4, 5 )
self.assertTrue(np.allclose(transpose(lowerCamelCase__, axes=(1, 2, 0) ), x.transpose((1, 2, 0) ) ) )
@require_torch
def _lowerCAmelCase ( self ):
A : List[str] = np.random.randn(3, 4 )
A : str = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ), transpose(lowerCamelCase__ ).numpy() ) )
A : Optional[int] = np.random.randn(3, 4, 5 )
A : Optional[Any] = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__, axes=(1, 2, 0) ), transpose(lowerCamelCase__, axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _lowerCAmelCase ( self ):
A : str = np.random.randn(3, 4 )
A : Optional[int] = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ), transpose(lowerCamelCase__ ).numpy() ) )
A : Optional[Any] = np.random.randn(3, 4, 5 )
A : List[Any] = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__, axes=(1, 2, 0) ), transpose(lowerCamelCase__, axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _lowerCAmelCase ( self ):
A : Tuple = np.random.randn(3, 4 )
A : Optional[int] = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__ ), np.asarray(transpose(lowerCamelCase__ ) ) ) )
A : Optional[int] = np.random.randn(3, 4, 5 )
A : Optional[Any] = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(transpose(lowerCamelCase__, axes=(1, 2, 0) ), np.asarray(transpose(lowerCamelCase__, axes=(1, 2, 0) ) ) ) )
def _lowerCAmelCase ( self ):
A : Optional[int] = np.random.randn(3, 4 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__, (4, 3) ), np.reshape(lowerCamelCase__, (4, 3) ) ) )
A : int = np.random.randn(3, 4, 5 )
self.assertTrue(np.allclose(reshape(lowerCamelCase__, (12, 5) ), np.reshape(lowerCamelCase__, (12, 5) ) ) )
@require_torch
def _lowerCAmelCase ( self ):
A : Optional[Any] = np.random.randn(3, 4 )
A : Tuple = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__, (4, 3) ), reshape(lowerCamelCase__, (4, 3) ).numpy() ) )
A : str = np.random.randn(3, 4, 5 )
A : Optional[Any] = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__, (12, 5) ), reshape(lowerCamelCase__, (12, 5) ).numpy() ) )
@require_tf
def _lowerCAmelCase ( self ):
A : int = np.random.randn(3, 4 )
A : str = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__, (4, 3) ), reshape(lowerCamelCase__, (4, 3) ).numpy() ) )
A : Tuple = np.random.randn(3, 4, 5 )
A : Any = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__, (12, 5) ), reshape(lowerCamelCase__, (12, 5) ).numpy() ) )
@require_flax
def _lowerCAmelCase ( self ):
A : Any = np.random.randn(3, 4 )
A : Optional[Any] = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__, (4, 3) ), np.asarray(reshape(lowerCamelCase__, (4, 3) ) ) ) )
A : Optional[int] = np.random.randn(3, 4, 5 )
A : Dict = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(reshape(lowerCamelCase__, (12, 5) ), np.asarray(reshape(lowerCamelCase__, (12, 5) ) ) ) )
def _lowerCAmelCase ( self ):
A : str = np.random.randn(1, 3, 4 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ), np.squeeze(lowerCamelCase__ ) ) )
A : str = np.random.randn(1, 4, 1, 5 )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__, axis=2 ), np.squeeze(lowerCamelCase__, axis=2 ) ) )
@require_torch
def _lowerCAmelCase ( self ):
A : List[Any] = np.random.randn(1, 3, 4 )
A : Tuple = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ), squeeze(lowerCamelCase__ ).numpy() ) )
A : Any = np.random.randn(1, 4, 1, 5 )
A : Dict = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__, axis=2 ), squeeze(lowerCamelCase__, axis=2 ).numpy() ) )
@require_tf
def _lowerCAmelCase ( self ):
A : str = np.random.randn(1, 3, 4 )
A : int = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ), squeeze(lowerCamelCase__ ).numpy() ) )
A : Dict = np.random.randn(1, 4, 1, 5 )
A : Dict = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__, axis=2 ), squeeze(lowerCamelCase__, axis=2 ).numpy() ) )
@require_flax
def _lowerCAmelCase ( self ):
A : List[str] = np.random.randn(1, 3, 4 )
A : List[Any] = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__ ), np.asarray(squeeze(lowerCamelCase__ ) ) ) )
A : Dict = np.random.randn(1, 4, 1, 5 )
A : List[Any] = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(squeeze(lowerCamelCase__, axis=2 ), np.asarray(squeeze(lowerCamelCase__, axis=2 ) ) ) )
def _lowerCAmelCase ( self ):
A : Any = np.random.randn(3, 4 )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__, axis=1 ), np.expand_dims(lowerCamelCase__, axis=1 ) ) )
@require_torch
def _lowerCAmelCase ( self ):
A : Optional[Any] = np.random.randn(3, 4 )
A : Optional[Any] = torch.tensor(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__, axis=1 ), expand_dims(lowerCamelCase__, axis=1 ).numpy() ) )
@require_tf
def _lowerCAmelCase ( self ):
A : Optional[Any] = np.random.randn(3, 4 )
A : Tuple = tf.constant(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__, axis=1 ), expand_dims(lowerCamelCase__, axis=1 ).numpy() ) )
@require_flax
def _lowerCAmelCase ( self ):
A : Tuple = np.random.randn(3, 4 )
A : int = jnp.array(lowerCamelCase__ )
self.assertTrue(np.allclose(expand_dims(lowerCamelCase__, axis=1 ), np.asarray(expand_dims(lowerCamelCase__, axis=1 ) ) ) )
| 116 |
'''simple docstring'''
from collections.abc import Sequence
def lowerCamelCase ( __lowerCamelCase : Sequence[float] , __lowerCamelCase : bool = False ) ->float:
if not arr:
return 0
_SCREAMING_SNAKE_CASE = 0 if allow_empty_subarrays else float("""-inf""" )
_SCREAMING_SNAKE_CASE = 0.0
for num in arr:
_SCREAMING_SNAKE_CASE = max(0 if allow_empty_subarrays else num , curr_sum + num )
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 58 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : List[str] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : int = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_lowercase : str = {"facebook/blenderbot_small-90M": 5_1_2}
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : int =set()
lowerCamelCase__ : Optional[int] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase__ : int =char
lowerCamelCase__ : int =set(__lowerCamelCase )
return pairs
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['input_ids', 'attention_mask']
def __init__( self : List[Any], lowerCamelCase : Dict, lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any]="__start__", lowerCamelCase : List[str]="__end__", lowerCamelCase : Optional[Any]="__unk__", lowerCamelCase : Union[str, Any]="__null__", **lowerCamelCase : Tuple, )-> int:
super().__init__(unk_token=lowerCamelCase, bos_token=lowerCamelCase, eos_token=lowerCamelCase, pad_token=lowerCamelCase, **lowerCamelCase )
with open(lowerCamelCase, encoding='''utf-8''' ) as vocab_handle:
lowerCamelCase__ : Tuple =json.load(lowerCamelCase )
lowerCamelCase__ : Tuple ={v: k for k, v in self.encoder.items()}
with open(lowerCamelCase, encoding='''utf-8''' ) as merges_handle:
lowerCamelCase__ : int =merges_handle.read().split('''\n''' )[1:-1]
lowerCamelCase__ : Union[str, Any] =[tuple(merge.split() ) for merge in merges]
lowerCamelCase__ : Optional[Any] =dict(zip(lowerCamelCase, range(len(lowerCamelCase ) ) ) )
lowerCamelCase__ : Optional[int] ={}
@property
def snake_case ( self : List[Any] )-> int:
return len(self.encoder )
def snake_case ( self : Optional[int] )-> Dict:
return dict(self.encoder, **self.added_tokens_encoder )
def snake_case ( self : Any, lowerCamelCase : str )-> str:
if token in self.cache:
return self.cache[token]
lowerCamelCase__ : List[str] =re.sub('''([.,!?()])''', r''' \1''', lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =re.sub('''(\')''', r''' \1 ''', lowerCamelCase )
lowerCamelCase__ : Any =re.sub(r'''\s{2,}''', ''' ''', lowerCamelCase )
if "\n" in token:
lowerCamelCase__ : str =token.replace('''\n''', ''' __newln__''' )
lowerCamelCase__ : List[str] =token.split(''' ''' )
lowerCamelCase__ : Any =[]
for token in tokens:
if not len(lowerCamelCase ):
continue
lowerCamelCase__ : Union[str, Any] =token.lower()
lowerCamelCase__ : Optional[int] =tuple(lowerCamelCase )
lowerCamelCase__ : List[str] =tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCamelCase__ : str =get_pairs(lowerCamelCase )
if not pairs:
words.append(lowerCamelCase )
continue
while True:
lowerCamelCase__ : Optional[int] =min(lowerCamelCase, key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase__ , lowerCamelCase__ : List[str] =bigram
lowerCamelCase__ : int =[]
lowerCamelCase__ : Optional[int] =0
while i < len(lowerCamelCase ):
try:
lowerCamelCase__ : Union[str, Any] =word.index(lowerCamelCase, lowerCamelCase )
new_word.extend(word[i:j] )
lowerCamelCase__ : List[str] =j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase__ : str =tuple(lowerCamelCase )
lowerCamelCase__ : Any =new_word
if len(lowerCamelCase ) == 1:
break
else:
lowerCamelCase__ : Optional[int] =get_pairs(lowerCamelCase )
lowerCamelCase__ : str ='''@@ '''.join(lowerCamelCase )
lowerCamelCase__ : str =word[:-4]
lowerCamelCase__ : Optional[Any] =word
words.append(lowerCamelCase )
return " ".join(lowerCamelCase )
def snake_case ( self : Any, lowerCamelCase : str )-> List[str]:
lowerCamelCase__ : Any =[]
lowerCamelCase__ : Any =re.findall(r'''\S+\n?''', lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def snake_case ( self : List[str], lowerCamelCase : str )-> int:
lowerCamelCase__ : Tuple =token.lower()
return self.encoder.get(lowerCamelCase, self.encoder.get(self.unk_token ) )
def snake_case ( self : Tuple, lowerCamelCase : int )-> str:
return self.decoder.get(lowerCamelCase, self.unk_token )
def snake_case ( self : Optional[int], lowerCamelCase : List[str] )-> str:
lowerCamelCase__ : Tuple =''' '''.join(lowerCamelCase ).replace('''@@ ''', '''''' ).strip()
return out_string
def snake_case ( self : Tuple, lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : List[str] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : Optional[Any] =os.path.join(
lowerCamelCase, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=lowerCamelCase, ensure_ascii=lowerCamelCase ) + '''\n''' )
lowerCamelCase__ : str =0
with open(lowerCamelCase, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCamelCase__ : Optional[int] =token_index
writer.write(''' '''.join(lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 272 |
"""simple docstring"""
from collections import defaultdict
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : List[str] )-> Optional[int]:
lowerCamelCase__ : List[Any] =total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCamelCase__ : Optional[Any] =[
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCamelCase ) )
]
lowerCamelCase__ : Any =defaultdict(lowerCamelCase ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCamelCase__ : List[Any] =(1 << len(lowerCamelCase )) - 1
def snake_case ( self : int, lowerCamelCase : str, lowerCamelCase : Any )-> Any:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCamelCase__ : Optional[int] =self.count_ways_until(lowerCamelCase, task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1 )
# save the value.
lowerCamelCase__ : int =total_ways_util
return self.dp[mask][task_no]
def snake_case ( self : Dict, lowerCamelCase : Dict )-> int:
# Store the list of persons for each task
for i in range(len(lowerCamelCase ) ):
for j in task_performed[i]:
self.task[j].append(lowerCamelCase )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0, 1 )
if __name__ == "__main__":
_lowercase : Tuple = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_lowercase : Dict = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 272 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ , n - 1 )
rec_insertion_sort(snake_case_ , n - 1 )
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : int ) -> List[str]:
'''simple docstring'''
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase_ , UpperCAmelCase_ = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ , index + 1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[int] =input('Enter integers separated by spaces: ')
SCREAMING_SNAKE_CASE_: list[int] =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 1 | '''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE_: Tuple =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
f"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
f"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
def lowerCAmelCase_ ( snake_case_ : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
return new_state_dict
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Dict=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCAmelCase_ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:2_56, :]
UpperCAmelCase_ = in_proj_bias[:2_56]
UpperCAmelCase_ = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ = in_proj_bias[2_56:5_12]
UpperCAmelCase_ = in_proj_weight[-2_56:, :]
UpperCAmelCase_ = in_proj_bias[-2_56:]
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( snake_case_ : Optional[int] , snake_case_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCAmelCase_ = "resnet101"
if "dc5" in model_name:
UpperCAmelCase_ = True
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 2_50
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(snake_case_ ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
# load image processor
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = ConditionalDetrImageProcessor(format=snake_case_ )
# prepare image
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=snake_case_ , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCAmelCase_ = torch.hub.load("DeppMeng/ConditionalDETR" , snake_case_ , pretrained=snake_case_ ).eval()
UpperCAmelCase_ = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCAmelCase_ = "conditional_detr." + src
rename_key(snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = rename_backbone_keys(snake_case_ )
# query, key and value matrices need special treatment
read_in_q_k_v(snake_case_ , is_panoptic=snake_case_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(snake_case_ )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = ConditionalDetrForSegmentation(snake_case_ ) if is_panoptic else ConditionalDetrForObjectDetection(snake_case_ )
model.load_state_dict(snake_case_ )
model.eval()
model.push_to_hub(repo_id=snake_case_ , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
UpperCAmelCase_ = conditional_detr(snake_case_ )
UpperCAmelCase_ = model(snake_case_ )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
image_processor.save_pretrained(snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: List[str] =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE_: int =parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 1 | 1 |
import sys
from collections import defaultdict
class _snake_case :
def __init__( self : List[str] ):
lowercase__ = []
def A__ ( self : Optional[int], __lowercase : str ):
return self.node_position[vertex]
def A__ ( self : int, __lowercase : List[Any], __lowercase : Optional[int] ):
lowercase__ = pos
def A__ ( self : Tuple, __lowercase : Dict, __lowercase : List[str], __lowercase : int, __lowercase : Optional[int] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowercase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowercase__ = 2 * start + 1
else:
lowercase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowercase__ , lowercase__ = heap[smallest_child], positions[smallest_child]
lowercase__ , lowercase__ = (
heap[start],
positions[start],
)
lowercase__ , lowercase__ = temp, tempa
lowercase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child], self.get_position(positions[start] ) )
self.set_position(positions[start], __lowercase )
self.top_to_bottom(__lowercase, __lowercase, __lowercase, __lowercase )
def A__ ( self : int, __lowercase : Tuple, __lowercase : List[str], __lowercase : Union[str, Any], __lowercase : Any ):
lowercase__ = position[index]
while index != 0:
lowercase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowercase__ = heap[parent]
lowercase__ = position[parent]
self.set_position(position[parent], __lowercase )
else:
lowercase__ = val
lowercase__ = temp
self.set_position(__lowercase, __lowercase )
break
lowercase__ = parent
else:
lowercase__ = val
lowercase__ = temp
self.set_position(__lowercase, 0 )
def A__ ( self : Dict, __lowercase : Any, __lowercase : Tuple ):
lowercase__ = len(__lowercase ) // 2 - 1
for i in range(__lowercase, -1, -1 ):
self.top_to_bottom(__lowercase, __lowercase, len(__lowercase ), __lowercase )
def A__ ( self : Any, __lowercase : str, __lowercase : int ):
lowercase__ = positions[0]
lowercase__ = sys.maxsize
self.top_to_bottom(__lowercase, 0, len(__lowercase ), __lowercase )
return temp
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowercase__ = Heap()
lowercase__ = [0] * len(SCREAMING_SNAKE_CASE_ )
lowercase__ = [-1] * len(SCREAMING_SNAKE_CASE_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowercase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowercase__ = []
for vertex in range(len(SCREAMING_SNAKE_CASE_ ) ):
distance_tv.append(sys.maxsize )
positions.append(SCREAMING_SNAKE_CASE_ )
heap.node_position.append(SCREAMING_SNAKE_CASE_ )
lowercase__ = []
lowercase__ = 1
lowercase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowercase__ = 0
lowercase__ = distance
heap.heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for _ in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ = heap.delete_minimum(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowercase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(SCREAMING_SNAKE_CASE_ )]
):
lowercase__ = distance
heap.bottom_to_top(
SCREAMING_SNAKE_CASE_ , heap.get_position(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase_ = int(input("""Enter number of edges: """).strip())
lowercase_ = defaultdict(list)
for _ in range(edges_number):
lowercase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 364 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 224 | 0 |
def _SCREAMING_SNAKE_CASE ( a , a , a , a , a , a ) -> Dict:
if index == r:
for j in range(a ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__A : List[Any] = arr[i]
combination_util(a , a , a , index + 1 , a , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(a , a , a , a , a , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> int:
# A temporary array to store all combination one by one
__A : str = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(a , a , a , 0 , a , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCAmelCase : List[Any] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 280 |
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A , __A : Optional[Any] = [], []
while len(a ) > 1:
__A , __A : Any = min(a ), max(a )
start.append(a )
end.append(a )
collection.remove(a )
collection.remove(a )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 280 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = IFPipeline
lowercase = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {'latents'}
def _lowercase( self ) -> int:
return self._get_dummy_components()
def _lowercase( self , A , A=0 ) -> Optional[Any]:
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[Any] = torch.manual_seed(A )
else:
UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self ) -> Any:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _lowercase( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowercase( self ) -> Tuple:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowercase( self ) -> Any:
self._test_save_load_local()
def _lowercase( self ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowercase( self ) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase( self ) -> Optional[Any]:
# if
UpperCAmelCase : List[Any] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
UpperCAmelCase : Optional[Any] = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A , tokenizer=A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
UpperCAmelCase , UpperCAmelCase : List[Any] = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
UpperCAmelCase : str = None
UpperCAmelCase : Dict = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A , A , A , A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
UpperCAmelCase : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
UpperCAmelCase : Any = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A , A , A , A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
UpperCAmelCase : List[str] = IFInpaintingPipeline(**pipe_a.components )
UpperCAmelCase : int = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A , A , A , A )
def _lowercase( self , A , A , A , A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase : Any = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , num_inference_steps=2 , generator=A , output_type="""np""" , )
UpperCAmelCase : Dict = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
UpperCAmelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(A , A )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A )
UpperCAmelCase : List[Any] = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , generator=A , num_inference_steps=2 , output_type="""np""" , )
UpperCAmelCase : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A , A )
def _lowercase( self , A , A , A , A ) -> Union[str, Any]:
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A )
UpperCAmelCase : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase : List[Any] = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , num_inference_steps=2 , generator=A , output_type="""np""" , )
UpperCAmelCase : Any = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCAmelCase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(A , A )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(A )
UpperCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A )
UpperCAmelCase : Optional[Any] = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , original_image=A , generator=A , num_inference_steps=2 , output_type="""np""" , )
UpperCAmelCase : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A , A )
def _lowercase( self , A , A , A , A ) -> Tuple:
# pipeline 1
_start_torch_memory_measurement()
UpperCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A )
UpperCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(A )
UpperCAmelCase : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , mask_image=A , num_inference_steps=2 , generator=A , output_type="""np""" , )
UpperCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
UpperCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
UpperCAmelCase : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(A , A )
# pipeline 2
_start_torch_memory_measurement()
UpperCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(A )
UpperCAmelCase : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(A )
UpperCAmelCase : str = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(A )
UpperCAmelCase : str = pipe_a(
prompt_embeds=A , negative_prompt_embeds=A , image=A , mask_image=A , original_image=A , generator=A , num_inference_steps=2 , output_type="""np""" , )
UpperCAmelCase : int = output.images[0]
assert image.shape == (256, 256, 3)
UpperCAmelCase : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
UpperCAmelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A , A )
def __lowerCamelCase ( ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 338 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : List[Any] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 338 | 1 |
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
a : Tuple = '''sshleifer/mar_enro_6_3_student'''
class __UpperCamelCase ( a__ ):
def __a ( self ) -> Optional[Any]:
super().setUp()
a : Any = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=lowerCAmelCase__ , )
a : List[Any] = f"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def __a ( self ) -> List[Any]:
MarianMTModel.from_pretrained(lowerCAmelCase__ )
@slow
@require_torch_gpu
def __a ( self ) -> List[str]:
a : Optional[int] = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
a : str = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
a : int = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
a : int = bash_script.replace(lowerCAmelCase__ , str(lowerCAmelCase__ ) )
a : Any = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
a : List[str] = f"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
a : Dict = ["finetune.py"] + bash_script.split() + args
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
a : List[Any] = argparse.ArgumentParser()
a : Dict = pl.Trainer.add_argparse_args(lowerCAmelCase__ )
a : List[str] = SummarizationModule.add_model_specific_args(lowerCAmelCase__ , os.getcwd() )
a : str = parser.parse_args()
a : Union[str, Any] = main(lowerCAmelCase__ )
# Check metrics
a : List[str] = load_json(model.metrics_save_path )
a : Optional[int] = metrics["val"][0]
a : Dict = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , lowerCAmelCase__ )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
a : int = os.listdir(lowerCAmelCase__ )
a : Tuple = [x for x in contents if x.endswith(".ckpt" )][0]
a : Optional[Any] = os.path.join(args.output_dir , lowerCAmelCase__ )
a : Any = torch.load(lowerCAmelCase__ , map_location="cpu" )
a : Dict = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
a : Dict = {os.path.basename(lowerCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class __UpperCamelCase ( a__ ):
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __a ( self ) -> Union[str, Any]:
a : int = f"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
a : Optional[Any] = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 128,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
a : Any = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
a : Union[str, Any] = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
a : Any = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
a : Dict = bash_script.replace(lowerCAmelCase__ , str(lowerCAmelCase__ ) )
a : int = self.get_auto_remove_tmp_dir()
a : Union[str, Any] = bash_script.replace("--fp16" , "" )
a : Optional[int] = 6
a : str = (
["distillation.py"]
+ bash_script.split()
+ [
f"""--output_dir={output_dir}""",
"--gpus=1",
"--learning_rate=1e-3",
f"""--num_train_epochs={epochs}""",
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
a : int = argparse.ArgumentParser()
a : Optional[int] = pl.Trainer.add_argparse_args(lowerCAmelCase__ )
a : Tuple = SummarizationDistiller.add_model_specific_args(lowerCAmelCase__ , os.getcwd() )
a : List[str] = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
a : Optional[int] = distill_main(lowerCAmelCase__ )
# Check metrics
a : Tuple = load_json(model.metrics_save_path )
a : Union[str, Any] = metrics["val"][0]
a : List[Any] = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f"""val_avg_{model.val_metric}"""] , lowerCAmelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
a : List[str] = os.listdir(lowerCAmelCase__ )
a : Optional[Any] = [x for x in contents if x.endswith(".ckpt" )][0]
a : Optional[int] = os.path.join(args.output_dir , lowerCAmelCase__ )
a : Optional[Any] = torch.load(lowerCAmelCase__ , map_location="cpu" )
a : int = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
a : Optional[int] = {os.path.basename(lowerCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 105 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
a : Union[str, Any] = grid[0]
for row_n in range(1 , len(_lowercase ) ):
a : Optional[Any] = grid[row_n]
a : Tuple = fill_row(_lowercase , _lowercase )
a : List[Any] = grid[row_n]
return grid[-1][-1]
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : list ) ->list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(_lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a ( SCREAMING_SNAKE_CASE_ : int=None ):
"""simple docstring"""
if subparsers is not None:
UpperCamelCase : int = subparsers.add_parser('''env''' )
else:
UpperCamelCase : List[str] = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=SCREAMING_SNAKE_CASE_ , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
UpperCamelCase : Tuple = torch.__version__
UpperCamelCase : str = torch.cuda.is_available()
UpperCamelCase : Dict = is_xpu_available()
UpperCamelCase : int = is_npu_available()
UpperCamelCase : List[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = load_config_from_file(args.config_file ).to_dict()
UpperCamelCase : Union[str, Any] = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''PyTorch XPU available''': str(SCREAMING_SNAKE_CASE_ ),
'''PyTorch NPU available''': str(SCREAMING_SNAKE_CASE_ ),
'''System RAM''': F"""{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB""",
}
if pt_cuda_available:
UpperCamelCase : str = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
UpperCamelCase : List[Any] = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else F"""\t{accelerate_config}"""
)
print(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = accelerate_config
return info
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = env_command_parser()
UpperCamelCase : Optional[int] = parser.parse_args()
env_command(SCREAMING_SNAKE_CASE_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 315 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315 | 1 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowercase :
def __init__( self , A_ , ) -> List[str]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = 13
UpperCamelCase = 7
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = 99
UpperCamelCase = 32
UpperCamelCase = 2
UpperCamelCase = 4
UpperCamelCase = 37
UpperCamelCase = "gelu"
UpperCamelCase = 0.1
UpperCamelCase = 0.1
UpperCamelCase = 512
UpperCamelCase = 16
UpperCamelCase = 2
UpperCamelCase = 0.02
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = None
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
(
UpperCamelCase
) = self.prepare_config_and_inputs()
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = TFEsmModel(config=lowercase_ )
UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCamelCase = model(lowercase_ )
UpperCamelCase = [input_ids, input_mask]
UpperCamelCase = model(lowercase_ )
UpperCamelCase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> str:
"""simple docstring"""
UpperCamelCase = True
UpperCamelCase = TFEsmModel(config=lowercase_ )
UpperCamelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
UpperCamelCase = model(lowercase_ )
UpperCamelCase = [input_ids, input_mask]
UpperCamelCase = model(lowercase_ , encoder_hidden_states=lowercase_ )
# Also check the case where encoder outputs are not passed
UpperCamelCase = model(lowercase_ , attention_mask=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
"""simple docstring"""
UpperCamelCase = TFEsmForMaskedLM(config=lowercase_ )
UpperCamelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = TFEsmForTokenClassification(config=lowercase_ )
UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCamelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
UpperCamelCase
) = config_and_inputs
UpperCamelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowercase ( lowercase__ , lowercase__ , unittest.TestCase ):
__lowercase : Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__lowercase : Optional[Any] = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__lowercase : int = False
__lowercase : List[str] = False
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = TFEsmModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowercase_ )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFEsmModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowercase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
UpperCamelCase = model.get_bias()
assert isinstance(lowercase_ , lowercase_ )
for k, v in name.items():
assert isinstance(lowercase_ , tf.Variable )
else:
UpperCamelCase = model.get_output_embeddings()
assert x is None
UpperCamelCase = model.get_bias()
assert name is None
@require_tf
class lowercase ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
UpperCamelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase = model(lowercase_ )[0]
UpperCamelCase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , lowercase_ )
# compare the actual values for a slice.
UpperCamelCase = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
UpperCamelCase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase = model(lowercase_ )[0]
# compare the actual values for a slice.
UpperCamelCase = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 222 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __a ( ):
UpperCAmelCase_ : List[Any] = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
UpperCAmelCase_ : Optional[int] = Dataset.from_dict(__lowerCamelCase )
return dataset
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_dataset()
UpperCAmelCase_ : Any = make_duplicate_clusters(lowercase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowercase_ )
| 61 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ : Optional[int] = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_a , _a = 1, 1
for _ in range(number_of_steps - 1 ):
_a , _a = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104 | 0 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : Optional[int] = "▁"
__A : Any = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
__A : Any = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
__A : List[str] = {
"facebook/m2m100_418M": 1024,
}
# fmt: off
__A : Any = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class _a ( SCREAMING_SNAKE_CASE_):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = ["""input_ids""", """attention_mask"""]
UpperCamelCase__ = []
UpperCamelCase__ = []
def __init__( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : Dict="<s>" , __UpperCamelCase : Optional[Any]="</s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : Optional[int]="<pad>" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : List[str]="m2m100" , __UpperCamelCase : Tuple = None , __UpperCamelCase : Optional[int]=8 , **__UpperCamelCase : Dict , )->None:
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCAmelCase = language_codes
_UpperCAmelCase = FAIRSEQ_LANGUAGE_CODES[language_codes]
_UpperCAmelCase = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
_UpperCAmelCase = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__UpperCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__UpperCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , language_codes=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__UpperCAmelCase , **__UpperCAmelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = load_json(__UpperCAmelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = spm_file
_UpperCAmelCase = load_spm(__UpperCAmelCase , self.sp_model_kwargs )
_UpperCAmelCase = len(self.encoder )
_UpperCAmelCase = {
self.get_lang_token(__UpperCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__UpperCAmelCase )
}
_UpperCAmelCase = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__UpperCAmelCase )}
_UpperCAmelCase = {v: k for k, v in self.lang_token_to_id.items()}
_UpperCAmelCase = src_lang if src_lang is not None else '''en'''
_UpperCAmelCase = tgt_lang
_UpperCAmelCase = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_UpperCAmelCase = num_madeup_words
@property
def lowercase__ ( self : Any )->int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowercase__ ( self : Union[str, Any] )->str:
return self._src_lang
@src_lang.setter
def lowercase__ ( self : int , __UpperCamelCase : Optional[int] )->None:
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Tuple )->List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Union[str, Any] )->str:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase__ ( self : str , __UpperCamelCase : Dict )->str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : Dict )->Union[str, Any]:
_UpperCAmelCase = []
_UpperCAmelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
_UpperCAmelCase = []
else:
current_sub_tokens.append(__UpperCAmelCase )
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int = None , __UpperCamelCase : Any = False )->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
_UpperCAmelCase = [1] * len(self.prefix_tokens )
_UpperCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def lowercase__ ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] = None )->List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : int )->Dict:
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any )->Dict:
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Optional[int] , __UpperCamelCase : List[str] )->None:
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCAmelCase = {}
_UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Dict = None )->Tuple[str]:
_UpperCAmelCase = Path(__UpperCAmelCase )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
_UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_UpperCAmelCase = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (str(__UpperCAmelCase ), str(__UpperCAmelCase ))
def lowercase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple = "en" , __UpperCamelCase : int = None , __UpperCamelCase : str = "ro" , **__UpperCamelCase : List[Any] , )->BatchEncoding:
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Tuple , **__UpperCamelCase : Any )->List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , **__UpperCAmelCase )
_UpperCAmelCase = self.get_lang_id(__UpperCAmelCase )
_UpperCAmelCase = tgt_lang_id
return inputs
def lowercase__ ( self : Union[str, Any] )->Any:
self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : List[Any] )->str:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : List[Any] , __UpperCamelCase : Tuple )->None:
_UpperCAmelCase = self.get_lang_token(__UpperCAmelCase )
_UpperCAmelCase = self.lang_token_to_id[lang_token]
_UpperCAmelCase = [self.cur_lang_id]
_UpperCAmelCase = [self.eos_token_id]
def lowercase__ ( self : int , __UpperCamelCase : Optional[Any] )->None:
_UpperCAmelCase = self.get_lang_token(__UpperCAmelCase )
_UpperCAmelCase = self.lang_token_to_id[lang_token]
_UpperCAmelCase = [self.cur_lang_id]
_UpperCAmelCase = [self.eos_token_id]
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] )->str:
return self.lang_code_to_token[lang]
def lowercase__ ( self : List[Any] , __UpperCamelCase : Any )->int:
_UpperCAmelCase = self.get_lang_token(__UpperCAmelCase )
return self.lang_token_to_id[lang_token]
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = sentencepiece.SentencePieceProcessor(**_SCREAMING_SNAKE_CASE )
spm.Load(str(_SCREAMING_SNAKE_CASE ) )
return spm
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , '''r''' ) as f:
return json.load(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , indent=2 )
| 260 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCamelCase_ = 2
class snake_case :
def __init__( self , *, # begin keyword-only arguments
__UpperCAmelCase="<s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase=None , ) ->Tuple:
a_ , a_ , a_ , a_ = bos, unk, pad, eos
a_ = []
a_ = []
a_ = {}
a_ = self.add_symbol(__UpperCAmelCase)
a_ = self.add_symbol(__UpperCAmelCase)
a_ = self.add_symbol(__UpperCAmelCase)
a_ = self.add_symbol(__UpperCAmelCase)
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__UpperCAmelCase)
a_ = len(self.symbols)
def __eq__( self , __UpperCAmelCase) ->Dict:
return self.indices == other.indices
def __getitem__( self , __UpperCAmelCase) ->Optional[Any]:
if idx < len(self.symbols):
return self.symbols[idx]
return self.unk_word
def __len__( self) ->Any:
return len(self.symbols)
def __contains__( self , __UpperCAmelCase) ->Dict:
return sym in self.indices
@classmethod
def UpperCAmelCase__ ( cls , __UpperCAmelCase) ->List[Any]:
a_ = cls()
d.add_from_file(__UpperCAmelCase)
return d
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=1 , __UpperCAmelCase=False) ->List[Any]:
if word in self.indices and not overwrite:
a_ = self.indices[word]
a_ = self.count[idx] + n
return idx
else:
a_ = len(self.symbols)
a_ = idx
self.symbols.append(__UpperCAmelCase)
self.count.append(__UpperCAmelCase)
return idx
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->Tuple:
return 0
def UpperCAmelCase__ ( self , __UpperCAmelCase) ->List[str]:
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
try:
with open(__UpperCAmelCase , "r" , encoding="utf-8") as fd:
self.add_from_file(__UpperCAmelCase)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(__UpperCAmelCase))
return
a_ = f.readlines()
a_ = self._load_meta(__UpperCAmelCase)
for line in lines[indices_start_line:]:
try:
a_ , a_ = line.rstrip().rsplit(" " , 1)
if field == "#fairseq:overwrite":
a_ = True
a_ , a_ = line.rsplit(" " , 1)
else:
a_ = False
a_ = int(__UpperCAmelCase)
a_ = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(__UpperCAmelCase))
self.add_symbol(__UpperCAmelCase , n=__UpperCAmelCase , overwrite=__UpperCAmelCase)
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'")
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = dict((re.sub(r"@@$" , "" , UpperCAmelCase ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , UpperCAmelCase ), v) for k, v in d.items() )
a_ = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
a_ = d[k] # restore
return da
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
if not os.path.exists(UpperCAmelCase ):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
a_ = os.path.join(UpperCAmelCase , "checkpoint.pt" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''' )
a_ = torch.load(UpperCAmelCase , map_location="cpu" )
a_ = chkpt["cfg"]["model"]
# dicts
a_ = os.path.join(UpperCAmelCase , "dict.txt" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(F'''path to the file {dict_file} does not exist!''' )
a_ = Dictionary.load(UpperCAmelCase )
a_ = rewrite_dict_keys(src_dict.indices )
a_ = len(UpperCAmelCase )
a_ = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["vocab_file"] )
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# merges_file (bpecodes)
a_ = os.path.join(UpperCAmelCase , "bpecodes" )
if not os.path.isfile(UpperCAmelCase ):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''' )
a_ = os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(UpperCAmelCase , UpperCAmelCase )
# model config
a_ = os.path.join(UpperCAmelCase , "config.json" )
a_ = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# tokenizer config
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
a_ = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1_024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'''Generating {biogpt_tokenizer_config_file}''' )
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(UpperCAmelCase , ensure_ascii=UpperCAmelCase , indent=UpperCAmelCase ) )
# model
a_ = chkpt["model"]
# remove unneeded keys
a_ = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCAmelCase , UpperCAmelCase )
a_ = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
a_ = model_state_dict.pop(UpperCAmelCase )
else:
a_ = model_state_dict.pop(UpperCAmelCase )
a_ = BioGptConfig.from_pretrained(UpperCAmelCase )
a_ = BioGptForCausalLM(UpperCAmelCase )
# check that it loads ok
model_new.load_state_dict(UpperCAmelCase )
# save
a_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCAmelCase , UpperCAmelCase )
print("Conversion is done!" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path) | 243 | 0 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
lowerCAmelCase_ : int = str(lowerCAmelCase__ )
lowerCAmelCase_ : int = ''.join(sorted(lowerCAmelCase__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def UpperCamelCase_ ( lowerCAmelCase__ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Optional[int] = 1
while True:
if check_bouncy(lowerCAmelCase__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(9_9)}')
| 289 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> list[int]:
"""simple docstring"""
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : List[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase__ )
if n > 1:
factors.append(lowerCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.