code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__magic_name__ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = ZeroShotClassificationPipeline(
model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : int = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(_UpperCAmelCase , {'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase )]} )
# No kwarg
__snake_case : int = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(_UpperCAmelCase , {'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase )]} )
__snake_case : Optional[int] = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(_UpperCAmelCase , {'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase )]} )
__snake_case : List[str] = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
_UpperCAmelCase , {'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__snake_case : int = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
_UpperCAmelCase , {'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
__snake_case : str = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(_UpperCAmelCase , {'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
__snake_case : str = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )]}
for i in range(1 )
] , )
__snake_case : Any = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
_UpperCAmelCase , [
{'sequence': ANY(_UpperCAmelCase ), 'labels': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )], 'scores': [ANY(_UpperCAmelCase ), ANY(_UpperCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(_UpperCAmelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(_UpperCAmelCase ):
classifier(_UpperCAmelCase , candidate_labels='politics' )
with self.assertRaises(_UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(_UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(_UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=_UpperCAmelCase , )
self.run_entailment_id(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = zero_shot_classifier.model.config
__snake_case : Any = config.labelaid
__snake_case : Optional[int] = zero_shot_classifier.entailment_id
__snake_case : Tuple = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__snake_case : List[str] = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__snake_case : List[str] = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__snake_case : Union[str, Any] = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__snake_case : str = original_labelaid
self.assertEqual(_UpperCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def lowercase_ ( self ):
__snake_case : Optional[Any] = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def lowercase_ ( self ):
__snake_case : int = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
__snake_case : Dict = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@require_tf
def lowercase_ ( self ):
__snake_case : Optional[Any] = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
__snake_case : Any = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def lowercase_ ( self ):
__snake_case : Optional[int] = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
__snake_case : Dict = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
__snake_case : int = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_UpperCAmelCase , )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def lowercase_ ( self ):
__snake_case : List[Any] = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
__snake_case : List[Any] = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.976, 0.015, 0.009],
} , )
__snake_case : Union[str, Any] = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=_UpperCAmelCase , )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.817, 0.713, 0.018, 0.018],
} , )
| 576 | import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__magic_name__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__UpperCAmelCase = field(
default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "Set this flag to use fast tokenization."})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__UpperCAmelCase = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."})
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , )
__UpperCAmelCase = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCAmelCase = field(
default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"})
def UpperCAmelCase__( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
__snake_case : Dict = import_module('tasks' )
try:
__snake_case : Tuple = getattr(__UpperCAmelCase , model_args.task_type )
__snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __UpperCAmelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
__snake_case : Optional[int] = token_classification_task.get_labels(data_args.labels )
__snake_case : Dict[int, str] = dict(enumerate(__UpperCAmelCase ) )
__snake_case : List[Any] = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid={label: i for i, label in enumerate(__UpperCAmelCase )} , cache_dir=model_args.cache_dir , )
__snake_case : Optional[int] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
__snake_case : Any = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , )
# Get datasets
__snake_case : List[Any] = (
TokenClassificationDataset(
token_classification_task=__UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=__UpperCAmelCase , labels=__UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
__snake_case : Dict = (
TokenClassificationDataset(
token_classification_task=__UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=__UpperCAmelCase , labels=__UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__UpperCAmelCase : np.ndarray , __UpperCAmelCase : np.ndarray ) -> Tuple[List[int], List[int]]:
__snake_case : Optional[Any] = np.argmax(__UpperCAmelCase , axis=2 )
__snake_case , __snake_case : Optional[Any] = preds.shape
__snake_case : Dict = [[] for _ in range(__UpperCAmelCase )]
__snake_case : Optional[Any] = [[] for _ in range(__UpperCAmelCase )]
for i in range(__UpperCAmelCase ):
for j in range(__UpperCAmelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__UpperCAmelCase : EvalPrediction ) -> Dict:
__snake_case , __snake_case : Union[str, Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__UpperCAmelCase , __UpperCAmelCase ),
"precision": precision_score(__UpperCAmelCase , __UpperCAmelCase ),
"recall": recall_score(__UpperCAmelCase , __UpperCAmelCase ),
"f1": fa_score(__UpperCAmelCase , __UpperCAmelCase ),
}
# Data collator
__snake_case : List[str] = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
__snake_case : Dict = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : int = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case : str = trainer.evaluate()
__snake_case : Optional[int] = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , __UpperCAmelCase , __UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
results.update(__UpperCAmelCase )
# Predict
if training_args.do_predict:
__snake_case : Optional[int] = TokenClassificationDataset(
token_classification_task=__UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=__UpperCAmelCase , labels=__UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
__snake_case , __snake_case , __snake_case : Optional[int] = trainer.predict(__UpperCAmelCase )
__snake_case , __snake_case : Optional[int] = align_predictions(__UpperCAmelCase , __UpperCAmelCase )
__snake_case : List[Any] = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , __UpperCAmelCase , __UpperCAmelCase )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
__snake_case : Tuple = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return results
def UpperCAmelCase__( __UpperCAmelCase : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 576 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
@dataclass
class lowercase :
def __init__( self , _snake_case=False , _snake_case=False , _snake_case=6.0 , _snake_case=None , _snake_case=False , _snake_case=False , _snake_case=None , _snake_case="fp4" , _snake_case=False , **_snake_case , ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = load_in_abit
UpperCAmelCase_ : Any = load_in_abit
UpperCAmelCase_ : str = llm_inta_threshold
UpperCAmelCase_ : str = llm_inta_skip_modules
UpperCAmelCase_ : Any = llm_inta_enable_fpaa_cpu_offload
UpperCAmelCase_ : Tuple = llm_inta_has_fpaa_weight
UpperCAmelCase_ : Any = bnb_abit_quant_type
UpperCAmelCase_ : int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCAmelCase_ : List[Any] = torch.floataa
elif isinstance(_snake_case , _snake_case):
UpperCAmelCase_ : Optional[int] = getattr(_snake_case , _snake_case)
elif isinstance(_snake_case , torch.dtype):
UpperCAmelCase_ : Any = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype')
self.post_init()
def _snake_case ( self) -> Union[str, Any]:
if not isinstance(self.llm_inta_threshold , _snake_case):
raise ValueError('llm_int8_threshold must be a float')
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _snake_case):
raise ValueError('llm_int8_skip_modules must be a list of strings')
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _snake_case):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean')
if not isinstance(self.llm_inta_has_fpaa_weight , _snake_case):
raise ValueError('llm_int8_has_fp16_weight must be a boolean')
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype')
if not isinstance(self.bnb_abit_quant_type , _snake_case):
raise ValueError('bnb_4bit_quant_type must be a string')
if not isinstance(self.bnb_abit_use_double_quant , _snake_case):
raise ValueError('bnb_4bit_use_double_quant must be a boolean')
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes')) >= version.parse(
'0.39.0'):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version')
def _snake_case ( self) -> Optional[int]:
return self.load_in_abit or self.load_in_abit
def _snake_case ( self) -> str:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def _snake_case ( cls , _snake_case , _snake_case , **_snake_case) -> str:
UpperCAmelCase_ : str = cls(**_snake_case)
UpperCAmelCase_ : List[Any] = []
for key, value in kwargs.items():
if hasattr(_snake_case , _snake_case):
setattr(_snake_case , _snake_case , _snake_case)
to_remove.append(_snake_case)
for key in to_remove:
kwargs.pop(_snake_case , _snake_case)
if return_unused_kwargs:
return config, kwargs
else:
return config
def _snake_case ( self , _snake_case) -> Dict:
with open(_snake_case , 'w' , encoding='utf-8') as writer:
UpperCAmelCase_ : Any = self.to_dict()
UpperCAmelCase_ : Optional[int] = json.dumps(_snake_case , indent=2 , sort_keys=_snake_case) + '\n'
writer.write(_snake_case)
def _snake_case ( self) -> Dict[str, Any]:
UpperCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__)
UpperCAmelCase_ : Optional[Any] = str(output['bnb_4bit_compute_dtype']).split('.')[1]
return output
def __repr__( self) -> Optional[Any]:
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def _snake_case ( self , _snake_case = True) -> str:
if use_diff is True:
UpperCAmelCase_ : Optional[int] = self.to_diff_dict()
else:
UpperCAmelCase_ : List[str] = self.to_dict()
return json.dumps(_snake_case , indent=2 , sort_keys=_snake_case) + "\n"
def _snake_case ( self) -> Dict[str, Any]:
UpperCAmelCase_ : List[str] = self.to_dict()
# get the default config dict
UpperCAmelCase_ : List[Any] = BitsAndBytesConfig().to_dict()
UpperCAmelCase_ : List[str] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCAmelCase_ : Any = value
return serializable_config_dict
| 471 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowercase ( a_ ):
pass
class lowercase :
def __init__( self , _snake_case) -> None:
UpperCAmelCase_ : Any = data
UpperCAmelCase_ : Node | None = None
def __iter__( self) -> Optional[int]:
UpperCAmelCase_ : int = self
UpperCAmelCase_ : List[str] = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_snake_case)
yield node.data
UpperCAmelCase_ : Tuple = node.next_node
@property
def _snake_case ( self) -> bool:
try:
list(self)
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowerCAmelCase__ = Node(1)
lowerCAmelCase__ = Node(2)
lowerCAmelCase__ = Node(3)
lowerCAmelCase__ = Node(4)
print(root_node.has_loop) # False
lowerCAmelCase__ = root_node.next_node
print(root_node.has_loop) # True
lowerCAmelCase__ = Node(5)
lowerCAmelCase__ = Node(6)
lowerCAmelCase__ = Node(5)
lowerCAmelCase__ = Node(6)
print(root_node.has_loop) # False
lowerCAmelCase__ = Node(1)
print(root_node.has_loop) # False
| 471 | 1 |
import argparse
import copy
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Tuple ={}
with open(lowerCamelCase ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__magic_name__ : Optional[Any] =[]
_list.append([line.split()[1], line.split()[2]] )
__magic_name__ : Dict =_list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__magic_name__ : List[Any] =[]
_list.append([line.split()[0], line.split()[2]] )
__magic_name__ : Tuple =_list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
with open(lowerCamelCase ) as f:
__magic_name__ : Optional[int] =f.read(1 )
__magic_name__ : Any =start_node
__magic_name__ : Union[str, Any] =[]
__magic_name__ : Dict =start_node
__magic_name__ : List[Any] =0
while visiting not in first_solution:
__magic_name__ : Dict =10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(lowerCamelCase ) and k[0] not in first_solution:
__magic_name__ : Tuple =k[1]
__magic_name__ : str =k[0]
first_solution.append(lowerCamelCase )
__magic_name__ : Optional[Any] =distance_of_first_solution + int(lowerCamelCase )
__magic_name__ : Tuple =best_node
first_solution.append(lowerCamelCase )
__magic_name__ : Optional[Any] =0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__magic_name__ : Union[str, Any] =(
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : Union[str, Any] =[]
for n in solution[1:-1]:
__magic_name__ : Union[str, Any] =solution.index(lowerCamelCase )
for kn in solution[1:-1]:
__magic_name__ : Union[str, Any] =solution.index(lowerCamelCase )
if n == kn:
continue
__magic_name__ : str =copy.deepcopy(lowerCamelCase )
__magic_name__ : List[str] =kn
__magic_name__ : List[str] =n
__magic_name__ : List[Any] =0
for k in _tmp[:-1]:
__magic_name__ : Optional[int] =_tmp[_tmp.index(lowerCamelCase ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__magic_name__ : int =distance + int(i[1] )
_tmp.append(lowerCamelCase )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__magic_name__ : List[str] =len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda lowerCamelCase : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[str] =1
__magic_name__ : List[str] =first_solution
__magic_name__ : int =[]
__magic_name__ : Dict =distance_of_first_solution
__magic_name__ : Union[str, Any] =solution
while count <= iters:
__magic_name__ : Dict =find_neighborhood(lowerCamelCase , lowerCamelCase )
__magic_name__ : Any =0
__magic_name__ : Any =neighborhood[index_of_best_solution]
__magic_name__ : Optional[Any] =len(lowerCamelCase ) - 1
__magic_name__ : List[str] =False
while not found:
__magic_name__ : Any =0
while i < len(lowerCamelCase ):
if best_solution[i] != solution[i]:
__magic_name__ : Any =best_solution[i]
__magic_name__ : Optional[Any] =solution[i]
break
__magic_name__ : int =i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__magic_name__ : Optional[int] =True
__magic_name__ : List[str] =best_solution[:-1]
__magic_name__ : Optional[Any] =neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__magic_name__ : List[Any] =cost
__magic_name__ : List[Any] =solution
else:
__magic_name__ : Optional[Any] =index_of_best_solution + 1
__magic_name__ : List[str] =neighborhood[index_of_best_solution]
if len(lowerCamelCase ) >= size:
tabu_list.pop(0 )
__magic_name__ : Optional[int] =count + 1
return best_solution_ever, best_cost
def lowerCAmelCase_ ( lowerCamelCase=None ):
__magic_name__ : int =generate_neighbours(args.File )
__magic_name__ , __magic_name__ : str =generate_first_solution(
args.File , lowerCamelCase )
__magic_name__ , __magic_name__ : List[Any] =tabu_search(
lowerCamelCase , lowerCamelCase , lowerCamelCase , args.Iterations , args.Size , )
print(F"Best solution: {best_sol}, with total distance: {best_cost}." )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 21 |
'''simple docstring'''
_UpperCAmelCase : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def snake_case__ ( UpperCamelCase ) -> int:
_UpperCamelCase : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_UpperCAmelCase : list[bool | None] = [None] * 10000000
_UpperCAmelCase : str = True
_UpperCAmelCase : Tuple = False
def snake_case__ ( UpperCamelCase ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCamelCase : List[str] = chain(next_number(UpperCamelCase ) )
_UpperCamelCase : Tuple = number_chain
while number < 10_00_00_00:
_UpperCamelCase : int = number_chain
number *= 10
return number_chain
def snake_case__ ( UpperCamelCase = 10_00_00_00 ) -> int:
for i in range(1 ,UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 683 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class __lowercase ( UpperCamelCase__ ):
def __init__( self : Any , *lowercase__ : Union[str, Any] , **lowercase__ : Optional[int] ):
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 710 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCamelCase__ = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCamelCase__ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __lowercase :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
class __lowercase :
def __init__( self : int , lowercase__ : Iterable[int] ):
a_ = None
for i in sorted(lowercase__ , reverse=lowercase__ ):
a_ = Node(lowercase__ , self.head )
def __iter__( self : str ):
a_ = self.head
while node:
yield node.data
a_ = node.next_node
def __len__( self : Optional[int] ):
return sum(1 for _ in self )
def __str__( self : Optional[Any] ):
return " -> ".join([str(lowercase__ ) for node in self] )
def UpperCAmelCase__ ( _A , _A ):
"""simple docstring"""
return SortedLinkedList(list(_A ) + list(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 143 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = BlipImageProcessor()
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
__SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[int] ,**lowerCamelCase : Any ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase ).tokenizer
def UpperCAmelCase__ ( self : Dict ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase ).image_processor
def UpperCAmelCase__ ( self : Optional[int] ,**lowerCamelCase : Dict ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase ).qformer_tokenizer
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ,qformer_tokenizer=self.get_qformer_tokenizer() ,)
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCamelCase ,padding_value=1.0 )
__SCREAMING_SNAKE_CASE = InstructBlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=lowerCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase )
self.assertIsInstance(processor.qformer_tokenizer ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase ,return_token_type_ids=lowerCamelCase )
__SCREAMING_SNAKE_CASE = qformer_tokenizer(lowerCamelCase ,return_token_type_ids=lowerCamelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] ,encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] ,encoded_processor["""qformer_""" + key] )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,images=lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] ,)
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,images=lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] ,)
| 109 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __a ( _snake_case ):
__UpperCamelCase : str = 'esm'
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : str=None ,lowerCamelCase : Any=None ,lowerCamelCase : Union[str, Any]=768 ,lowerCamelCase : Tuple=12 ,lowerCamelCase : int=12 ,lowerCamelCase : Optional[int]=3072 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Optional[int]=0.1 ,lowerCamelCase : Any=1026 ,lowerCamelCase : str=0.02 ,lowerCamelCase : int=1E-1_2 ,lowerCamelCase : Union[str, Any]="absolute" ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : str=None ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : int=False ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Any=None ,**lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,mask_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = emb_layer_norm_before
__SCREAMING_SNAKE_CASE = token_dropout
__SCREAMING_SNAKE_CASE = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__SCREAMING_SNAKE_CASE = EsmFoldConfig()
elif isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = EsmFoldConfig(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__SCREAMING_SNAKE_CASE = get_default_vocab_list()
else:
__SCREAMING_SNAKE_CASE = vocab_list
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.esmfold_config is not None and getattr(self.esmfold_config ,"""use_esm_attn_map""" ,lowerCamelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().to_dict()
if isinstance(self.esmfold_config ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = self.esmfold_config.to_dict()
return output
@dataclass
class __a :
__UpperCamelCase : str = None
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : float = 0
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : int = 128
__UpperCamelCase : "TrunkConfig" = None
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if self.trunk is None:
__SCREAMING_SNAKE_CASE = TrunkConfig()
elif isinstance(self.trunk ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = asdict(self )
__SCREAMING_SNAKE_CASE = self.trunk.to_dict()
return output
@dataclass
class __a :
__UpperCamelCase : int = 48
__UpperCamelCase : int = 1024
__UpperCamelCase : int = 128
__UpperCamelCase : int = 32
__UpperCamelCase : int = 32
__UpperCamelCase : int = 32
__UpperCamelCase : float = 0
__UpperCamelCase : float = 0
__UpperCamelCase : bool = False
__UpperCamelCase : int = 4
__UpperCamelCase : Optional[int] = 128
__UpperCamelCase : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
if self.structure_module is None:
__SCREAMING_SNAKE_CASE = StructureModuleConfig()
elif isinstance(self.structure_module ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
__SCREAMING_SNAKE_CASE = self.sequence_state_dim // self.sequence_head_width
__SCREAMING_SNAKE_CASE = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = asdict(self )
__SCREAMING_SNAKE_CASE = self.structure_module.to_dict()
return output
@dataclass
class __a :
__UpperCamelCase : int = 384
__UpperCamelCase : int = 128
__UpperCamelCase : int = 16
__UpperCamelCase : int = 128
__UpperCamelCase : int = 12
__UpperCamelCase : int = 4
__UpperCamelCase : int = 8
__UpperCamelCase : float = 0.1
__UpperCamelCase : int = 8
__UpperCamelCase : int = 1
__UpperCamelCase : int = 2
__UpperCamelCase : int = 7
__UpperCamelCase : int = 10
__UpperCamelCase : float = 1E-8
__UpperCamelCase : float = 1E5
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return asdict(self )
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 109 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A (UpperCamelCase_ ):
snake_case :int = ["image_processor", "tokenizer"]
snake_case :Optional[Any] = "BridgeTowerImageProcessor"
snake_case :Union[str, Any] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ):
__UpperCAmelCase : int = self.tokenizer(
text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , )
# add pixel_values + pixel_mask
__UpperCAmelCase : int = self.image_processor(
UpperCamelCase_ , return_tensors=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , **UpperCamelCase_ )
encoding.update(UpperCamelCase_ )
return encoding
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def _snake_case ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def _snake_case ( self ):
__UpperCAmelCase : Tuple = self.tokenizer.model_input_names
__UpperCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 718 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : Dict = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __A (__magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
snake_case :Union[str, Any] = StableDiffusionLatentUpscalePipeline
snake_case :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"height",
"width",
"cross_attention_kwargs",
"negative_prompt_embeds",
"prompt_embeds",
}
snake_case :List[str] = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"}
snake_case :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case :Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case :Any = frozenset([] )
snake_case :Optional[int] = True
@property
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = 4
__UpperCAmelCase : List[str] = (16, 16)
__UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
def _snake_case ( self ):
torch.manual_seed(0 )
__UpperCAmelCase : List[str] = UNetaDConditionModel(
act_fn="gelu" , attention_head_dim=8 , norm_num_groups=UpperCamelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
"KDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
"KCrossAttnDownBlock2D",
) , in_channels=8 , mid_block_type=UpperCamelCase_ , only_cross_attention=UpperCamelCase_ , out_channels=5 , resnet_time_scale_shift="scale_shift" , time_embedding_type="fourier" , timestep_post_act="gelu" , up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D") , )
__UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
__UpperCAmelCase : Optional[int] = EulerDiscreteScheduler(prediction_type="sample" )
__UpperCAmelCase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="quick_gelu" , projection_dim=5_12 , )
__UpperCAmelCase : List[str] = CLIPTextModel(UpperCamelCase_ )
__UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__UpperCAmelCase : Union[str, Any] = {
"unet": model.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
if str(UpperCamelCase_ ).startswith("mps" ):
__UpperCAmelCase : str = torch.manual_seed(UpperCamelCase_ )
else:
__UpperCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__UpperCAmelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": self.dummy_image.cpu(),
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
__UpperCAmelCase : List[str] = "cpu"
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Tuple = self.pipeline_class(**UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : int = pipe(**UpperCamelCase_ ).images
__UpperCAmelCase : Any = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3) )
__UpperCAmelCase : Tuple = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
__UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase_ , 1E-3 )
def _snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def _snake_case ( self ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def _snake_case ( self ):
super().test_save_load_local(expected_max_difference=3E-3 )
def _snake_case ( self ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def _snake_case ( self ):
__UpperCAmelCase : Dict = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"KDPM2AncestralDiscreteScheduler",
"DPMSolverSDEScheduler",
]
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ )
__UpperCAmelCase : List[str] = 2
__UpperCAmelCase : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase_ , scheduler_enum.name )
__UpperCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
__UpperCAmelCase : Optional[int] = pipe(**UpperCamelCase_ )[0]
outputs.append(UpperCamelCase_ )
assert check_same_shape(UpperCamelCase_ )
@require_torch_gpu
@slow
class __A (unittest.TestCase ):
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = torch.manual_seed(33 )
__UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" , torch_dtype=torch.floataa )
pipe.to("cuda" )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[int] = "a photo of an astronaut high resolution, unreal engine, ultra realistic"
__UpperCAmelCase : Any = pipe(UpperCamelCase_ , generator=UpperCamelCase_ , output_type="latent" ).images
__UpperCAmelCase : int = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" )
assert np.abs((expected_image - image).mean() ) < 5E-2
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = torch.manual_seed(33 )
__UpperCAmelCase : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
"stabilityai/sd-x2-latent-upscaler" , torch_dtype=torch.floataa )
upscaler.to("cuda" )
__UpperCAmelCase : Optional[Any] = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas"
__UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" )
__UpperCAmelCase : Dict = upscaler(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCamelCase_ , output_type="np" , ).images[0]
__UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" )
assert np.abs((expected_image - image).max() ) < 5E-2
| 10 | 0 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCamelCase_ = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self, A, A, A = None, A = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : str = os.path.abspath(os.path.join('examples', 'by_feature' ) )
SCREAMING_SNAKE_CASE : str = os.path.abspath('examples' )
for item in os.listdir(A ):
if item not in EXCLUDE_EXAMPLES:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(A, A )
if os.path.isfile(A ) and ".py" in item_path:
with self.subTest(
tested_script=A, feature_script=A, tested_section='main()' if parser_only else 'training_function()', ):
SCREAMING_SNAKE_CASE : str = compare_against_test(
os.path.join(A, A ), A, A, A )
SCREAMING_SNAKE_CASE : Union[str, Any] = '\n'.join(A )
if special_strings is not None:
for string in special_strings:
SCREAMING_SNAKE_CASE : int = diff.replace(A, '' )
self.assertEqual(A, '' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.one_complete_example('complete_nlp_example.py', A )
self.one_complete_example('complete_nlp_example.py', A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = os.path.abspath(os.path.join('examples', 'cv_example.py' ) )
SCREAMING_SNAKE_CASE : Dict = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py', A, A, A )
self.one_complete_example('complete_cv_example.py', A, A, A )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[Any] = False
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
super().setUpClass()
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(cls._tmpdir, 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE : Any = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def UpperCamelCase_ ( cls ):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'epoch_0' ) ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split()
SCREAMING_SNAKE_CASE : Tuple = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir, 'step_2' ) ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'epoch_0' )}\n ".split()
SCREAMING_SNAKE_CASE : List[str] = run_command(self._launch_args + testargs, return_stdout=A )
self.assertNotIn('epoch 0:', A )
self.assertIn('epoch 1:', A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir, 'step_2' )}\n ".split()
SCREAMING_SNAKE_CASE : Optional[Any] = run_command(self._launch_args + testargs, return_stdout=A )
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE : Optional[Any] = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE : Any = 1
if num_processes > 1:
self.assertNotIn('epoch 0:', A )
self.assertIn('epoch 1:', A )
else:
self.assertIn('epoch 0:', A )
self.assertIn('epoch 1:', A )
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ, {'TESTING_MOCKED_DATALOADERS': '0'} ):
SCREAMING_SNAKE_CASE : Union[str, Any] = run_command(self._launch_args + testargs, return_stdout=A )
SCREAMING_SNAKE_CASE : Optional[int] = re.findall('({.+})', A )
SCREAMING_SNAKE_CASE : Dict = [r for r in results if 'accuracy' in r][-1]
SCREAMING_SNAKE_CASE : Optional[int] = ast.literal_eval(A )
self.assertGreaterEqual(results['accuracy'], 0.75 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ, {'WANDB_MODE': 'offline'} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
SCREAMING_SNAKE_CASE : Optional[Any] = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(A, 'tracking' ) ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 28 | def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase : Tuple = int(snake_case__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase : List[str] = divmod(snake_case__, 2 )
return binary_recursive(snake_case__ ) + str(snake_case__ )
def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase : Optional[int] = str(snake_case__ ).strip()
if not number:
raise ValueError("No input value was provided" )
__UpperCAmelCase : Optional[int] = "-" if number.startswith("-" ) else ""
__UpperCAmelCase : Optional[int] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(snake_case__ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 382 | 0 |
def lowercase ( a ):
'''simple docstring'''
if length <= 0 or not isinstance(a , a ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(a )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 140 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Collection[float] | None = None):
if components is None:
SCREAMING_SNAKE_CASE_ :List[str] = []
SCREAMING_SNAKE_CASE_ :Optional[int] = list(UpperCAmelCase)
def __len__( self : Optional[Any]):
return len(self.__components)
def __str__( self : List[Any]):
return "(" + ",".join(map(UpperCAmelCase , self.__components)) + ")"
def __add__( self : Optional[int] , UpperCAmelCase : Vector):
SCREAMING_SNAKE_CASE_ :List[str] = len(self)
if size == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [self.__components[i] + other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return Vector(UpperCAmelCase)
else:
raise Exception("must have the same size")
def __sub__( self : List[str] , UpperCAmelCase : Vector):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = len(self)
if size == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Dict = [self.__components[i] - other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return Vector(UpperCAmelCase)
else: # error case
raise Exception("must have the same size")
@overload
def __mul__( self : List[Any] , UpperCAmelCase : float):
...
@overload
def __mul__( self : int , UpperCAmelCase : Vector):
...
def __mul__( self : int , UpperCAmelCase : float | Vector):
if isinstance(UpperCAmelCase , (float, int)):
SCREAMING_SNAKE_CASE_ :Tuple = [c * other for c in self.__components]
return Vector(UpperCAmelCase)
elif isinstance(UpperCAmelCase , UpperCAmelCase) and len(self) == len(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Optional[int] = len(self)
SCREAMING_SNAKE_CASE_ :str = [self.__components[i] * other.component(UpperCAmelCase) for i in range(UpperCAmelCase)]
return sum(UpperCAmelCase)
else: # error case
raise Exception("invalid operand!")
def _snake_case ( self : Any):
return Vector(self.__components)
def _snake_case ( self : str , UpperCAmelCase : int):
if isinstance(UpperCAmelCase , UpperCAmelCase) and -len(self.__components) <= i < len(self.__components):
return self.__components[i]
else:
raise Exception("index out of range")
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : float):
assert -len(self.__components) <= pos < len(self.__components)
SCREAMING_SNAKE_CASE_ :List[str] = value
def _snake_case ( self : str):
if len(self.__components) == 0:
raise Exception("Vector is empty")
SCREAMING_SNAKE_CASE_ :Optional[int] = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCAmelCase))
def _snake_case ( self : str , UpperCAmelCase : Vector , UpperCAmelCase : bool = False):
SCREAMING_SNAKE_CASE_ :Optional[Any] = self * other
SCREAMING_SNAKE_CASE_ :Dict = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den))
else:
return math.acos(num / den)
def lowercase ( a ):
'''simple docstring'''
assert isinstance(a , a )
return Vector([0] * dimension )
def lowercase ( a , a ):
'''simple docstring'''
assert isinstance(a , a ) and (isinstance(a , a ))
SCREAMING_SNAKE_CASE_ :str = [0] * dimension
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 1
return Vector(a )
def lowercase ( a , a , a ):
'''simple docstring'''
assert (
isinstance(a , a )
and isinstance(a , a )
and (isinstance(a , (int, float) ))
)
return x * scalar + y
def lowercase ( a , a , a ):
'''simple docstring'''
random.seed(a )
SCREAMING_SNAKE_CASE_ :int = [random.randint(a , a ) for _ in range(a )]
return Vector(a )
class _UpperCAmelCase :
def __init__( self : Optional[int] , UpperCAmelCase : list[list[float]] , UpperCAmelCase : int , UpperCAmelCase : int):
SCREAMING_SNAKE_CASE_ :str = matrix
SCREAMING_SNAKE_CASE_ :List[Any] = w
SCREAMING_SNAKE_CASE_ :List[Any] = h
def __str__( self : List[str]):
SCREAMING_SNAKE_CASE_ :Optional[Any] = ""
for i in range(self.__height):
ans += "|"
for j in range(self.__width):
if j < self.__width - 1:
ans += str(self.__matrix[i][j]) + ","
else:
ans += str(self.__matrix[i][j]) + "|\n"
return ans
def __add__( self : Union[str, Any] , UpperCAmelCase : Matrix):
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE_ :Any = []
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :str = [
self.__matrix[i][j] + other.component(UpperCAmelCase , UpperCAmelCase)
for j in range(self.__width)
]
matrix.append(UpperCAmelCase)
return Matrix(UpperCAmelCase , self.__width , self.__height)
else:
raise Exception("matrix must have the same dimension!")
def __sub__( self : Union[str, Any] , UpperCAmelCase : Matrix):
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE_ :List[Any] = []
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [
self.__matrix[i][j] - other.component(UpperCAmelCase , UpperCAmelCase)
for j in range(self.__width)
]
matrix.append(UpperCAmelCase)
return Matrix(UpperCAmelCase , self.__width , self.__height)
else:
raise Exception("matrices must have the same dimension!")
@overload
def __mul__( self : Tuple , UpperCAmelCase : float):
...
@overload
def __mul__( self : Optional[Any] , UpperCAmelCase : Vector):
...
def __mul__( self : List[str] , UpperCAmelCase : float | Vector):
if isinstance(UpperCAmelCase , UpperCAmelCase): # matrix-vector
if len(UpperCAmelCase) == self.__width:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = zero_vector(self.__height)
for i in range(self.__height):
SCREAMING_SNAKE_CASE_ :Optional[Any] = [
self.__matrix[i][j] * other.component(UpperCAmelCase)
for j in range(self.__width)
]
ans.change_component(UpperCAmelCase , sum(UpperCAmelCase))
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!")
elif isinstance(UpperCAmelCase , (int, float)): # matrix-scalar
SCREAMING_SNAKE_CASE_ :Tuple = [
[self.__matrix[i][j] * other for j in range(self.__width)]
for i in range(self.__height)
]
return Matrix(UpperCAmelCase , self.__width , self.__height)
return None
def _snake_case ( self : Optional[int]):
return self.__height
def _snake_case ( self : Optional[int]):
return self.__width
def _snake_case ( self : str , UpperCAmelCase : int , UpperCAmelCase : int):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds")
def _snake_case ( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float):
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE_ :Dict = value
else:
raise Exception("change_component: indices out of bounds")
def _snake_case ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int):
if self.__height != self.__width:
raise Exception("Matrix is not square")
SCREAMING_SNAKE_CASE_ :Dict = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCAmelCase)):
SCREAMING_SNAKE_CASE_ :Dict = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCAmelCase , self.__width - 1 , self.__height - 1).determinant()
def _snake_case ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int):
if self.__height != self.__width:
raise Exception("Matrix is not square")
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCAmelCase , UpperCAmelCase)
else:
raise Exception("Indices out of bounds")
def _snake_case ( self : Union[str, Any]):
if self.__height != self.__width:
raise Exception("Matrix is not square")
if self.__height < 1:
raise Exception("Matrix has no element")
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE_ :str = [
self.__matrix[0][y] * self.cofactor(0 , UpperCAmelCase) for y in range(self.__width)
]
return sum(UpperCAmelCase)
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :list[list[float]] = [[0] * n for _ in range(a )]
return Matrix(a , a , a )
def lowercase ( a , a , a , a ):
'''simple docstring'''
random.seed(a )
SCREAMING_SNAKE_CASE_ :list[list[float]] = [
[random.randint(a , a ) for _ in range(a )] for _ in range(a )
]
return Matrix(a , a , a )
| 140 | 1 |
def lowerCAmelCase_ ( __a ) -> None:
"""simple docstring"""
lowerCamelCase__: List[Any] =generate_pascal_triangle(__a )
for row_idx in range(__a ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def lowerCAmelCase_ ( __a ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
lowerCamelCase__: list[list[int]] =[]
for current_row_idx in range(__a ):
lowerCamelCase__: Any =populate_current_row(__a , __a )
triangle.append(__a )
return triangle
def lowerCAmelCase_ ( __a , __a ) -> list[int]:
"""simple docstring"""
lowerCamelCase__: Dict =[-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCamelCase__ , lowerCamelCase__: Dict =1, 1
for current_col_idx in range(1 , __a ):
calculate_current_element(
__a , __a , __a , __a )
return current_row
def lowerCAmelCase_ ( __a , __a , __a , __a , ) -> None:
"""simple docstring"""
lowerCamelCase__: Dict =triangle[current_row_idx - 1][current_col_idx - 1]
lowerCamelCase__: List[Any] =triangle[current_row_idx - 1][current_col_idx]
lowerCamelCase__: int =above_to_left_elt + above_to_right_elt
def lowerCAmelCase_ ( __a ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
lowerCamelCase__: list[list[int]] =[[1]]
for row_index in range(1 , __a ):
lowerCamelCase__: str =[0] + result[-1] + [0]
lowerCamelCase__: List[str] =row_index + 1
# Calculate the number of distinct elements in a row
lowerCamelCase__: str =sum(divmod(__a , 2 ) )
lowerCamelCase__: Union[str, Any] =[
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowerCamelCase__: Optional[int] =row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCamelCase__: Optional[int] =row_first_half + row_second_half
result.append(__a )
return result
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a , __a ) -> None:
lowerCamelCase__: Union[str, Any] =F"""{func.__name__}({value})"""
lowerCamelCase__: Any =timeit(F"""__main__.{call}""" , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__a , __a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 59 |
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCamelCase ( __lowercase ):
@slow
@require_torch
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_lowerCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_lowerCAmelCase = bertabert.config.encoder.vocab_size
_lowerCAmelCase = tokenizer.sep_token_id
_lowerCAmelCase = tokenizer.cls_token_id
_lowerCAmelCase = 128
_lowerCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_lowerCAmelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_lowerCAmelCase = train_dataset.select(range(32 ) )
_lowerCAmelCase = val_dataset.select(range(16 ) )
_lowerCAmelCase = 4
def _map_to_encoder_decoder_inputs(lowerCamelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_lowerCAmelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=lowerCamelCase , max_length=512 )
_lowerCAmelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=lowerCamelCase , max_length=128 )
_lowerCAmelCase = inputs.input_ids
_lowerCAmelCase = inputs.attention_mask
_lowerCAmelCase = outputs.input_ids
_lowerCAmelCase = outputs.input_ids.copy()
_lowerCAmelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
_lowerCAmelCase = outputs.attention_mask
assert all(len(lowerCamelCase ) == 512 for x in inputs.input_ids )
assert all(len(lowerCamelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCamelCase ):
_lowerCAmelCase = pred.label_ids
_lowerCAmelCase = pred.predictions
# all unnecessary tokens are removed
_lowerCAmelCase = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
_lowerCAmelCase = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
_lowerCAmelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCamelCase ) )] ) / len(lowerCamelCase )
return {"accuracy": accuracy}
# map train dataset
_lowerCAmelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCamelCase , batch_size=lowerCamelCase , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_lowerCAmelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCamelCase , batch_size=lowerCamelCase , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_lowerCAmelCase = self.get_auto_remove_tmp_dir()
_lowerCAmelCase = SeqaSeqTrainingArguments(
output_dir=lowerCamelCase , per_device_train_batch_size=lowerCamelCase , per_device_eval_batch_size=lowerCamelCase , predict_with_generate=lowerCamelCase , evaluation_strategy="""steps""" , do_train=lowerCamelCase , do_eval=lowerCamelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_lowerCAmelCase = SeqaSeqTrainer(
model=lowerCamelCase , args=lowerCamelCase , compute_metrics=_compute_metrics , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , tokenizer=lowerCamelCase , )
# start training
trainer.train() | 156 | 0 |
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
a_ : Union[str, Any] = 0
a_ : Tuple = str(UpperCAmelCase__ )
while len(UpperCAmelCase__ ) != 1:
a_ : Dict = [int(UpperCAmelCase__ ) for i in num_string]
a_ : List[Any] = 1
for i in range(0 , len(UpperCAmelCase__ ) ):
total *= numbers[i]
a_ : str = str(UpperCAmelCase__ )
steps += 1
return steps
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> Dict:
"""simple docstring"""
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
a_ : Union[str, Any] = 0
a_ : Dict = str(UpperCAmelCase__ )
while len(UpperCAmelCase__ ) != 1:
a_ : Union[str, Any] = [int(UpperCAmelCase__ ) for i in num_string]
a_ : Dict = 0
for i in range(0 , len(UpperCAmelCase__ ) ):
total += numbers[i]
a_ : Union[str, Any] = str(UpperCAmelCase__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase_ : Dict = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : int=None ) -> int:
"""simple docstring"""
require_version(deps[pkg] , __A )
| 443 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __magic_name__ (__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowercase : List[Any] = 42
@flax_register_to_config
class __magic_name__ (nn.Module ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowercase : Optional[Any] = 32
__lowercase : str = 4
__lowercase : int = 4
__lowercase : List[Any] = (
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'DownBlock2D',
)
__lowercase : List[Any] = ('UpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'CrossAttnUpBlock2D')
__lowercase : Tuple = False
__lowercase : Optional[int] = (3_20, 6_40, 12_80, 12_80)
__lowercase : List[Any] = 2
__lowercase : Any = 8
__lowercase : List[str] = None
__lowercase : Optional[int] = 12_80
__lowercase : Optional[int] = 0.0
__lowercase : Union[str, Any] = False
__lowercase : Union[str, Any] = jnp.floataa
__lowercase : Any = True
__lowercase : int = 0
__lowercase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE__ ( self:Dict , _a:Tuple ):
snake_case__ = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case__ = jnp.zeros(snake_case__ , dtype=jnp.floataa )
snake_case__ = jnp.ones((1,) , dtype=jnp.intaa )
snake_case__ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case__ = jax.random.split(snake_case__ )
snake_case__ = {"params": params_rng, "dropout": dropout_rng}
return self.init(snake_case__ , snake_case__ , snake_case__ , snake_case__ )["params"]
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
snake_case__ = self.block_out_channels
snake_case__ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case__ = self.num_attention_heads or self.attention_head_dim
# input
snake_case__ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case__ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case__ = FlaxTimestepEmbedding(snake_case__ , dtype=self.dtype )
snake_case__ = self.only_cross_attention
if isinstance(snake_case__ , snake_case__ ):
snake_case__ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(snake_case__ , snake_case__ ):
snake_case__ = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case__ = []
snake_case__ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case__ = output_channel
snake_case__ = block_out_channels[i]
snake_case__ = i == len(snake_case__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case__ = FlaxCrossAttnDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case__ = FlaxDownBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(snake_case__ )
snake_case__ = down_blocks
# mid
snake_case__ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case__ = []
snake_case__ = list(reversed(snake_case__ ) )
snake_case__ = list(reversed(snake_case__ ) )
snake_case__ = list(reversed(snake_case__ ) )
snake_case__ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case__ = output_channel
snake_case__ = reversed_block_out_channels[i]
snake_case__ = reversed_block_out_channels[min(i + 1 , len(snake_case__ ) - 1 )]
snake_case__ = i == len(snake_case__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case__ = FlaxCrossAttnUpBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case__ = FlaxUpBlockaD(
in_channels=snake_case__ , out_channels=snake_case__ , prev_output_channel=snake_case__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(snake_case__ )
snake_case__ = output_channel
snake_case__ = up_blocks
# out
snake_case__ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
snake_case__ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self:Optional[Any] , _a:List[Any] , _a:str , _a:Tuple , _a:Dict=None , _a:Any=None , _a:Tuple = True , _a:str = False , ):
if not isinstance(snake_case__ , jnp.ndarray ):
snake_case__ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(snake_case__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case__ = timesteps.astype(dtype=jnp.floataa )
snake_case__ = jnp.expand_dims(snake_case__ , 0 )
snake_case__ = self.time_proj(snake_case__ )
snake_case__ = self.time_embedding(snake_case__ )
# 2. pre-process
snake_case__ = jnp.transpose(snake_case__ , (0, 2, 3, 1) )
snake_case__ = self.conv_in(snake_case__ )
# 3. down
snake_case__ = (sample,)
for down_block in self.down_blocks:
if isinstance(snake_case__ , snake_case__ ):
snake_case__ = down_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
else:
snake_case__ = down_block(snake_case__ , snake_case__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case__ = ()
for down_block_res_sample, down_block_additional_residual in zip(
snake_case__ , snake_case__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case__ = new_down_block_res_samples
# 4. mid
snake_case__ = self.mid_block(snake_case__ , snake_case__ , snake_case__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case__ = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case__ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(snake_case__ , snake_case__ ):
snake_case__ = up_block(
snake_case__ , temb=snake_case__ , encoder_hidden_states=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train , )
else:
snake_case__ = up_block(snake_case__ , temb=snake_case__ , res_hidden_states_tuple=snake_case__ , deterministic=not train )
# 6. post-process
snake_case__ = self.conv_norm_out(snake_case__ )
snake_case__ = nn.silu(snake_case__ )
snake_case__ = self.conv_out(snake_case__ )
snake_case__ = jnp.transpose(snake_case__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=snake_case__ )
| 33 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : Any = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = """timesformer"""
def __init__( self , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=8 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-6 , snake_case__=True , snake_case__="divided_space_time" , snake_case__=0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : Optional[int] = patch_size
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : Any = num_frames
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Any = num_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = intermediate_size
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = initializer_range
_SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = qkv_bias
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_type
_SCREAMING_SNAKE_CASE : str = drop_path_rate
| 572 | 0 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = tmp_path / '''cache'''
UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Any = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = tmp_path / '''cache'''
UpperCAmelCase : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCAmelCase : str = features.copy() if features else default_expected_features
UpperCAmelCase : Optional[Any] = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Tuple = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : List[Any] ) -> Dict:
with contextlib.closing(sqlitea.connect(_lowerCAmelCase ) ) as con:
UpperCAmelCase : Any = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Optional[int]:
UpperCAmelCase : Optional[int] = tmp_path / '''cache'''
UpperCAmelCase : int = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : Any = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase )
UpperCAmelCase : List[Any] = iter_sql_file(_lowerCAmelCase )
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : Optional[Any] = tmp_path / '''cache'''
UpperCAmelCase : Any = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : List[str] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
UpperCAmelCase : List[str] = iter_sql_file(_lowerCAmelCase )
UpperCAmelCase : Any = iter_sql_file(_lowerCAmelCase )
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = tmp_path / '''cache'''
UpperCAmelCase : Tuple = os.path.join(_lowerCAmelCase , '''tmp.sql''' )
UpperCAmelCase : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=_lowerCAmelCase ).read()
with pytest.raises(_lowerCAmelCase ):
SqlDatasetWriter(_lowerCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 528 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
UpperCamelCase__: Tuple = [8, 5, 9, 7]
UpperCamelCase__: int = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
UpperCamelCase__: List[str] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : str , __snake_case : list[int] , __snake_case : list[list[int]] , __snake_case : list[list[int]] , ) -> None:
UpperCAmelCase : Union[str, Any] = claim_vector
UpperCAmelCase : Tuple = allocated_resources_table
UpperCAmelCase : List[str] = maximum_claim_table
def A ( self : Optional[Any] ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A ( self : Union[str, Any] ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A ( self : str ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__snake_case ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A ( self : List[Any] ) -> dict[int, list[int]]:
return {self.__need().index(__snake_case ): i for i in self.__need()}
def A ( self : Tuple , **__snake_case : Union[str, Any] ) -> None:
UpperCAmelCase : Tuple = self.__need()
UpperCAmelCase : Union[str, Any] = self.__allocated_resources_table
UpperCAmelCase : int = self.__available_resources()
UpperCAmelCase : Tuple = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
UpperCAmelCase : Dict = False
for each_need in need_list:
UpperCAmelCase : Optional[int] = True
for index, need in enumerate(__snake_case ):
if need > available_resources[index]:
UpperCAmelCase : List[str] = False
break
if execution:
UpperCAmelCase : str = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCAmelCase : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__snake_case )
# update available/freed resources stack
UpperCAmelCase : Optional[int] = np.array(__snake_case ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__snake_case ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def A ( self : Dict ) -> Optional[Any]:
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(__snake_case ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(__snake_case ) + 1}"""
+ ''' '''.join(F"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__snake_case ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__snake_case ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 528 | 1 |
"""simple docstring"""
from typing import Any
class a__ :
def __init__( self : List[str] , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : str = data
__UpperCAmelCase : Optional[Any] = None
class a__ :
def __init__( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = None
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.head
while temp is not None:
print(temp.data , end=" ")
__UpperCAmelCase : Tuple = temp.next
print()
def a_ ( self : int , UpperCamelCase_ : Any):
"""simple docstring"""
__UpperCAmelCase : List[str] = Node(UpperCamelCase_)
__UpperCAmelCase : str = self.head
__UpperCAmelCase : Optional[int] = new_node
def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
__UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase : Tuple = node_a.next
__UpperCAmelCase : List[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
__UpperCAmelCase : Optional[Any] = node_a.next
if node_a is None or node_a is None:
return
__UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data
if __name__ == "__main__":
A = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 77 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
a : Optional[Any] = logging.get_logger(__name__)
a : List[str] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}." )
if tokenizer_name is None:
UpperCAmelCase : List[str] = TOKENIZER_CLASSES
else:
UpperCAmelCase : int = {tokenizer_name: getattr(__magic_name__ , tokenizer_name + "Fast" )}
logger.info(F"Loading tokenizer classes: {tokenizer_names}" )
for tokenizer_name in tokenizer_names:
UpperCAmelCase : Tuple = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase : Union[str, Any] = True
if checkpoint_name is None:
UpperCAmelCase : List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase : Dict = [checkpoint_name]
logger.info(F"For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}" )
for checkpoint in checkpoint_names:
logger.info(F"Loading {tokenizer_class.__class__.__name__} {checkpoint}" )
# Load tokenizer
UpperCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(__magic_name__ , force_download=__magic_name__ )
# Save fast tokenizer
logger.info(F"Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}" )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase , UpperCAmelCase : Dict = checkpoint.split("/" )
UpperCAmelCase : Optional[int] = os.path.join(__magic_name__ , __magic_name__ )
elif add_prefix:
UpperCAmelCase : List[Any] = checkpoint
UpperCAmelCase : str = dump_path
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : List[Any] = dump_path
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase : List[Any] = file_path.split(__magic_name__ )[-1][0]
if next_char == "/":
UpperCAmelCase : str = os.path.join(__magic_name__ , __magic_name__ )
UpperCAmelCase : Dict = None
logger.info(F"=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}" )
UpperCAmelCase : Any = tokenizer.save_pretrained(
__magic_name__ , legacy_format=__magic_name__ , filename_prefix=__magic_name__ )
logger.info(F"=> File names {file_names}" )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(__magic_name__ )
logger.info(F"=> removing {file_name}" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
a : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 679 | 0 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
_a : Optional[Any] = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
_a : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
if "://" in dataset_path:
__UpperCAmelCase : Optional[int] = dataset_path.split("://" )[1]
return dataset_path
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = not is_remote_filesystem(_UpperCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_UpperCamelCase ) , fs._strip_protocol(_UpperCamelCase ) )
else:
fs.mv(_UpperCamelCase , _UpperCamelCase , recursive=_UpperCamelCase )
def _lowercase ( ) -> List[Any]:
"""simple docstring"""
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
__UpperCAmelCase : int = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Union[str, Any] = threading.Lock()
| 720 | '''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10 | 0 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def snake_case ( A__ ):
UpperCAmelCase_ : int = int(number**0.5 )
return number == sq * sq
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ : int = x_den * y_den * z_den
UpperCAmelCase_ : int = gcd(A__ ,A__ )
top //= hcf
bottom //= hcf
return top, bottom
def snake_case ( A__ = 35 ):
UpperCAmelCase_ : set = set()
UpperCAmelCase_ : int
UpperCAmelCase_ : Fraction = Fraction(0 )
UpperCAmelCase_ : tuple[int, int]
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
UpperCAmelCase_ : Optional[int] = x_num * y_den + x_den * y_num
UpperCAmelCase_ : List[Any] = x_den * y_den
UpperCAmelCase_ : Tuple = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : int = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
# n=2
UpperCAmelCase_ : Optional[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ : Dict = x_den * x_den * y_den * y_den
if is_sq(A__ ) and is_sq(A__ ):
UpperCAmelCase_ : int = int(sqrt(A__ ) )
UpperCAmelCase_ : Any = int(sqrt(A__ ) )
UpperCAmelCase_ : str = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Tuple = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
# n=-1
UpperCAmelCase_ : Optional[int] = x_num * y_num
UpperCAmelCase_ : Dict = x_den * y_num + x_num * y_den
UpperCAmelCase_ : Union[str, Any] = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : List[Any] = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
# n=2
UpperCAmelCase_ : Optional[Any] = x_num * x_num * y_num * y_num
UpperCAmelCase_ : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(A__ ) and is_sq(A__ ):
UpperCAmelCase_ : str = int(sqrt(A__ ) )
UpperCAmelCase_ : int = int(sqrt(A__ ) )
UpperCAmelCase_ : Optional[Any] = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Optional[int] = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
for num, den in unique_s:
total += Fraction(A__ ,A__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 95 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def snake_case ( A__ ):
UpperCAmelCase_ : int = int(number**0.5 )
return number == sq * sq
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCAmelCase_ : int = x_den * y_den * z_den
UpperCAmelCase_ : int = gcd(A__ ,A__ )
top //= hcf
bottom //= hcf
return top, bottom
def snake_case ( A__ = 35 ):
UpperCAmelCase_ : set = set()
UpperCAmelCase_ : int
UpperCAmelCase_ : Fraction = Fraction(0 )
UpperCAmelCase_ : tuple[int, int]
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
UpperCAmelCase_ : Optional[int] = x_num * y_den + x_den * y_num
UpperCAmelCase_ : List[Any] = x_den * y_den
UpperCAmelCase_ : Tuple = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : int = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
# n=2
UpperCAmelCase_ : Optional[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCAmelCase_ : Dict = x_den * x_den * y_den * y_den
if is_sq(A__ ) and is_sq(A__ ):
UpperCAmelCase_ : int = int(sqrt(A__ ) )
UpperCAmelCase_ : Any = int(sqrt(A__ ) )
UpperCAmelCase_ : str = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Tuple = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
# n=-1
UpperCAmelCase_ : Optional[int] = x_num * y_num
UpperCAmelCase_ : Dict = x_den * y_num + x_num * y_den
UpperCAmelCase_ : Union[str, Any] = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : List[Any] = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
# n=2
UpperCAmelCase_ : Optional[Any] = x_num * x_num * y_num * y_num
UpperCAmelCase_ : Dict = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(A__ ) and is_sq(A__ ):
UpperCAmelCase_ : str = int(sqrt(A__ ) )
UpperCAmelCase_ : int = int(sqrt(A__ ) )
UpperCAmelCase_ : Optional[Any] = gcd(A__ ,A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCAmelCase_ : Optional[int] = add_three(
A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
unique_s.add(A__ )
for num, den in unique_s:
total += Fraction(A__ ,A__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'{solution() = }')
| 95 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'pegasus'
_UpperCAmelCase = ['past_key_values']
_UpperCAmelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[Any] , __snake_case : List[Any]=50265 , __snake_case : Any=1024 , __snake_case : Tuple=12 , __snake_case : Union[str, Any]=4096 , __snake_case : List[str]=16 , __snake_case : Union[str, Any]=12 , __snake_case : List[Any]=4096 , __snake_case : Any=16 , __snake_case : Optional[int]=0.0 , __snake_case : List[str]=0.0 , __snake_case : Dict=True , __snake_case : Union[str, Any]=True , __snake_case : Tuple="gelu" , __snake_case : Union[str, Any]=1024 , __snake_case : List[Any]=0.1 , __snake_case : Optional[Any]=0.0 , __snake_case : Any=0.0 , __snake_case : int=0.0_2 , __snake_case : Optional[int]=0 , __snake_case : Union[str, Any]=False , __snake_case : Any=0 , __snake_case : Any=1 , __snake_case : List[Any]=1 , **__snake_case : Tuple , ):
lowerCamelCase :Any = vocab_size
lowerCamelCase :Tuple = max_position_embeddings
lowerCamelCase :Optional[int] = d_model
lowerCamelCase :int = encoder_ffn_dim
lowerCamelCase :Union[str, Any] = encoder_layers
lowerCamelCase :Tuple = encoder_attention_heads
lowerCamelCase :List[str] = decoder_ffn_dim
lowerCamelCase :Dict = decoder_layers
lowerCamelCase :Tuple = decoder_attention_heads
lowerCamelCase :List[Any] = dropout
lowerCamelCase :List[Any] = attention_dropout
lowerCamelCase :int = activation_dropout
lowerCamelCase :str = activation_function
lowerCamelCase :str = init_std
lowerCamelCase :Any = encoder_layerdrop
lowerCamelCase :Dict = decoder_layerdrop
lowerCamelCase :Union[str, Any] = use_cache
lowerCamelCase :int = encoder_layers
lowerCamelCase :Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , )
@property
def snake_case ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
| 49 | def _lowerCamelCase ( a_ : list):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''')
for cell_n in range(1 , len(grid[0])):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase :Any = grid[0]
for row_n in range(1 , len(a_)):
lowerCamelCase :List[str] = grid[row_n]
lowerCamelCase :Union[str, Any] = fill_row(a_ , a_)
lowerCamelCase :List[Any] = grid[row_n]
return grid[-1][-1]
def _lowerCamelCase ( a_ : list , a_ : list):
current_row[0] += row_above[0]
for cell_n in range(1 , len(a_)):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n])
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ = TypeVar('''T''')
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self , lowerCamelCase ):
snake_case__ = data
snake_case__ = None
def __str__( self ):
return F"""{self.data}"""
class _SCREAMING_SNAKE_CASE ( Generic[T] ):
def __init__( self ):
snake_case__ = None
def __iter__( self ):
snake_case__ = self.top
while node:
yield node.data
snake_case__ = node.next
def __str__( self ):
return "->".join([str(lowerCamelCase ) for item in self] )
def __len__( self ):
return len(tuple(iter(self ) ) )
def A_ ( self ):
return self.top is None
def A_ ( self , lowerCamelCase ):
snake_case__ = Node(lowerCamelCase )
if not self.is_empty():
snake_case__ = self.top
snake_case__ = node
def A_ ( self ):
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , lowerCamelCase )
snake_case__ = self.top
snake_case__ = self.top.next
return pop_node.data
def A_ ( self ):
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def A_ ( self ):
snake_case__ = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 276 |
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if any(not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(__lowerCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__lowerCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 276 | 1 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowercase = '''pt'''
elif is_tf_available():
__lowercase = '''tf'''
else:
__lowercase = '''jax'''
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = ByTaTokenizer
lowercase__ = False
def UpperCAmelCase_ ( self : Any ) -> List[Any]:
'''simple docstring'''
super().setUp()
__UpperCamelCase =ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def UpperCAmelCase_ ( self : Any , **UpperCamelCase__ : List[str] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=20 , UpperCamelCase__ : Dict=5 ) -> Tuple[str, list]:
'''simple docstring'''
__UpperCamelCase =[]
for i in range(len(UpperCamelCase__ ) ):
try:
__UpperCamelCase =tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__UpperCamelCase =list(filter(lambda UpperCamelCase__ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , UpperCamelCase__ ) )
__UpperCamelCase =list(filter(lambda UpperCamelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
__UpperCamelCase =toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
__UpperCamelCase =toks + toks
# toks_str = [t[1] for t in toks]
__UpperCamelCase =[t[0] for t in toks]
# Ensure consistency
__UpperCamelCase =tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
__UpperCamelCase =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
__UpperCamelCase =''' ''' + output_txt
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =self.ta_base_tokenizer
__UpperCamelCase =tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
__UpperCamelCase =tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.ta_base_tokenizer
__UpperCamelCase ='''Unicode €.'''
__UpperCamelCase =tokenizer(UpperCamelCase__ )
__UpperCamelCase =[88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , UpperCamelCase__ )
# decoding
__UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''Unicode €.</s>''' )
__UpperCamelCase =tokenizer('''e è é ê ë''' )
__UpperCamelCase =[104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , UpperCamelCase__ )
# decoding
__UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =self.ta_base_tokenizer
__UpperCamelCase =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__UpperCamelCase =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__UpperCamelCase =tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
__UpperCamelCase =list(batch.input_ids.numpy()[0] )
else:
__UpperCamelCase =list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =self.ta_base_tokenizer
__UpperCamelCase =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__UpperCamelCase =tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , UpperCamelCase__ )
self.assertIn('''attention_mask''' , UpperCamelCase__ )
self.assertNotIn('''decoder_input_ids''' , UpperCamelCase__ )
self.assertNotIn('''decoder_attention_mask''' , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =self.ta_base_tokenizer
__UpperCamelCase =[
'''Summary of the text.''',
'''Another summary.''',
]
__UpperCamelCase =tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding='''max_length''' , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =self.ta_base_tokenizer
__UpperCamelCase =['''A long paragraph for summarization. </s>''']
__UpperCamelCase =['''Summary of the text. </s>''']
# fmt: off
__UpperCamelCase =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__UpperCamelCase =[86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__UpperCamelCase =tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch['''input_ids'''][0] )
self.assertEqual(UpperCamelCase__ , batch['''labels'''][0] )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCamelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =''' He is very happy, UNwant\u00E9d,running'''
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
__UpperCamelCase =tokenizer.__class__.from_pretrained(UpperCamelCase__ )
__UpperCamelCase =after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
__UpperCamelCase =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__UpperCamelCase =tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
__UpperCamelCase =tokenizer.__class__.from_pretrained(UpperCamelCase__ )
__UpperCamelCase =after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCamelCase =tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__UpperCamelCase =json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__UpperCamelCase =json.load(UpperCamelCase__ )
__UpperCamelCase =[f"""<extra_id_{i}>""" for i in range(125 )]
__UpperCamelCase =added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__UpperCamelCase =added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(UpperCamelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCamelCase =tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCamelCase =added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=UpperCamelCase__ )]
__UpperCamelCase =tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
__UpperCamelCase =tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase =['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
__UpperCamelCase =tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase =[
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__UpperCamelCase =0
__UpperCamelCase =tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + '''_id''' , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + '''_id''' ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + '''_id''' , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + '''_id''' ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(UpperCamelCase__ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , '''additional_special_tokens_ids''' ) , [] )
setattr(UpperCamelCase__ , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 296 | """simple docstring"""
from math import ceil
def lowerCAmelCase (__UpperCamelCase : List[str] , __UpperCamelCase : Any ):
"""simple docstring"""
__UpperCamelCase =list(range(0 , __UpperCamelCase ) )
__UpperCamelCase =[item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__UpperCamelCase =[]
for i in device_map_blocks:
if device_map_blocks.count(__UpperCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__UpperCamelCase )
# Missing blocks
__UpperCamelCase =[i for i in blocks if i not in device_map_blocks]
__UpperCamelCase =[i for i in device_map_blocks if i not in blocks]
if len(__UpperCamelCase ) != 0:
raise ValueError(
'''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'''
''' These attention blocks were specified more than once: ''' + str(__UpperCamelCase ) )
if len(__UpperCamelCase ) != 0:
raise ValueError(
'''There are attention blocks for this model that are not specified in the device_map. Add these attention '''
'''blocks to a device on the device_map: ''' + str(__UpperCamelCase ) )
if len(__UpperCamelCase ) != 0:
raise ValueError(
'''The device_map contains more attention blocks than this model has. Remove these from the device_map:'''
+ str(__UpperCamelCase ) )
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase =list(range(__UpperCamelCase ) )
__UpperCamelCase =int(ceil(n_layers / len(__UpperCamelCase ) ) )
__UpperCamelCase =[layers[i : i + n_blocks] for i in range(0 , __UpperCamelCase , __UpperCamelCase )]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
| 296 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _snake_case ( a_ , a_ ):
SCREAMING_SNAKE_CASE : Dict = '''swin'''
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=2_24 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=96 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[3, 6, 12, 24] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
lowerCAmelCase = ['stem'] + [F'stage{idx}' for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
lowerCAmelCase , lowerCAmelCase = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 1e-4
| 284 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Dict = (DDPMScheduler,)
def _SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [1_00, 87, 50, 1, 0]
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
| 284 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self : List[str] , a__ : Optional[Any] , a__ : str=7 , a__ : int=3 , a__ : Union[str, Any]=18 , a__ : Any=30 , a__ : Tuple=400 , a__ : Dict=True , a__ : Dict=None , a__ : List[str]=True , ):
"""simple docstring"""
__snake_case = size if size is not None else {'''height''': 18, '''width''': 18}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = image_size
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_normalize
def a (self : Optional[int] ):
"""simple docstring"""
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = ImageGPTImageProcessor if is_vision_available() else None
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = ImageGPTImageProcessingTester(self )
@property
def a (self : Tuple ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , '''clusters''' ) )
self.assertTrue(hasattr(a__ , '''do_resize''' ) )
self.assertTrue(hasattr(a__ , '''size''' ) )
self.assertTrue(hasattr(a__ , '''do_normalize''' ) )
def a (self : str ):
"""simple docstring"""
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def a (self : str ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
__snake_case = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(a__ , obj[key] ) )
else:
self.assertEqual(obj[key] , a__ )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = os.path.join(a__ , '''image_processor.json''' )
image_processor_first.to_json_file(a__ )
__snake_case = self.image_processing_class.from_json_file(a__ ).to_dict()
__snake_case = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(a__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , a__ )
def a (self : int ):
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(a__ )
__snake_case = self.image_processing_class.from_pretrained(a__ ).to_dict()
__snake_case = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(a__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , a__ )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def a (self : Optional[Any] ):
"""simple docstring"""
pass
def lowerCamelCase__ ( ) -> List[str]:
__snake_case = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__snake_case = Image.open(dataset[4]['''file'''] )
__snake_case = Image.open(dataset[5]['''file'''] )
__snake_case = [imagea, imagea]
return images
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
__snake_case = prepare_images()
# test non-batched
__snake_case = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
__snake_case = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , a__ )
# test batched
__snake_case = image_processing(a__ , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
__snake_case = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , a__ )
| 388 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : List[str] , a__ : Union[str, Any] , a__ : int = 13 , a__ : int = 64 , a__ : int = 2 , a__ : int = 3 , a__ : int = 3 , a__ : bool = True , a__ : bool = True , a__ : int = 128 , a__ : Optional[Any]=[16, 32, 64, 128] , a__ : int = 7 , a__ : int = 4 , a__ : int = 37 , a__ : str = "gelu" , a__ : float = 0.1 , a__ : float = 0.1 , a__ : int = 10 , a__ : float = 0.0_2 , a__ : int = 2 , a__ : int = 1 , a__ : int = 128 , a__ : List[int] = [2, 2, 2, 2] , a__ : int = 2 , a__ : int = 2 , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = encoder_stride
__snake_case = num_attention_outputs
__snake_case = embed_dim
__snake_case = embed_dim + 1
__snake_case = resolution
__snake_case = depths
__snake_case = hidden_sizes
__snake_case = dim
__snake_case = mlp_expansion_ratio
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : int ):
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def a (self : Optional[int] , a__ : List[str] , a__ : Tuple , a__ : int ):
"""simple docstring"""
__snake_case = TFEfficientFormerModel(config=a__ )
__snake_case = model(a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a (self : Optional[Any] , a__ : Any , a__ : Tuple , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.type_sequence_label_size
__snake_case = TFEfficientFormerForImageClassification(a__ )
__snake_case = model(a__ , labels=a__ , training=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = TFEfficientFormerForImageClassification(a__ )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : List[str] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
A_ : Union[str, Any] = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
A_ : str = False
A_ : int = False
A_ : List[Any] = False
A_ : Tuple = False
A_ : int = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = TFEfficientFormerModelTester(self )
__snake_case = ConfigTester(
self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def a (self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
def a (self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : Optional[int] ):
"""simple docstring"""
def check_hidden_states_output(a__ : Optional[Any] , a__ : Optional[int] , a__ : Optional[Any] ):
__snake_case = model_class(a__ )
__snake_case = model(**self._prepare_for_class(a__ , a__ ) , training=a__ )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(a__ ) , a__ )
if hasattr(self.model_tester , '''encoder_seq_length''' ):
__snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''' ) and self.model_tester.chunk_length > 1:
__snake_case = seq_length * self.model_tester.chunk_length
else:
__snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(a__ , (list, tuple) )
self.assertEqual(len(a__ ) , a__ )
__snake_case = getattr(self.model_tester , '''seq_length''' , a__ )
__snake_case = getattr(self.model_tester , '''decoder_seq_length''' , a__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(a__ , a__ , a__ )
def a (self : Any , a__ : Dict , a__ : Any , a__ : int=False ):
"""simple docstring"""
__snake_case = super()._prepare_for_class(a__ , a__ , return_labels=a__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a__ )
def a (self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = TFEfficientFormerModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a (self : str ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = True
__snake_case = getattr(self.model_tester , '''seq_length''' , a__ )
__snake_case = getattr(self.model_tester , '''encoder_seq_length''' , a__ )
__snake_case = getattr(self.model_tester , '''key_length''' , a__ )
__snake_case = getattr(self.model_tester , '''chunk_length''' , a__ )
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes''' ):
__snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__snake_case = True
__snake_case = False
__snake_case = True
__snake_case = model_class(a__ )
__snake_case = model(**self._prepare_for_class(a__ , a__ ) , training=a__ )
__snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case = True
__snake_case = model_class(a__ )
__snake_case = model(**self._prepare_for_class(a__ , a__ ) , training=a__ )
__snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__snake_case = model_class(a__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=a__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__snake_case = model(a__ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase__ ( ) -> int:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : List[str] ):
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''tf''' )
# forward pass
__snake_case = model(**a__ , training=a__ )
# verify the logits
__snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
@slow
def a (self : Dict ):
"""simple docstring"""
__snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''tf''' )
# forward pass
__snake_case = model(**a__ , training=a__ )
# verify the logits
__snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , a__ , atol=1E-4 ) )
| 388 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A : Optional[Any] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
A : str = _LazyModule(__name__, globals()['__file__'], _import_structure) | 516 | """simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Tuple = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = 'bloom'
A = ['past_key_values']
A = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self :Tuple , lowerCamelCase_ :str=2_5_0_8_8_0 , lowerCamelCase_ :Optional[int]=6_4 , lowerCamelCase_ :Union[str, Any]=2 , lowerCamelCase_ :Optional[int]=8 , lowerCamelCase_ :Any=1e-5 , lowerCamelCase_ :int=0.02 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Optional[int]=1 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Optional[Any]=False , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :List[str]=0.0 , lowerCamelCase_ :Tuple=1 , lowerCamelCase_ :Optional[int]=False , **lowerCamelCase_ :Dict , ) -> str:
"""simple docstring"""
UpperCamelCase__ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase__ = kwargs.pop("n_embed" , lowerCamelCase_ )
UpperCamelCase__ = hidden_size if n_embed is None else n_embed
UpperCamelCase__ = n_layer
UpperCamelCase__ = n_head
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = initializer_range
UpperCamelCase__ = use_cache
UpperCamelCase__ = pretraining_tp
UpperCamelCase__ = apply_residual_connection_post_layernorm
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = bos_token_id
UpperCamelCase__ = eos_token_id
UpperCamelCase__ = slow_but_exact
super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
A = version.parse('1.12' )
def __init__( self :Tuple , lowerCamelCase_ :PretrainedConfig , lowerCamelCase_ :str = "default" , lowerCamelCase_ :List[PatchingSpec] = None , lowerCamelCase_ :bool = False , ) -> Dict:
"""simple docstring"""
super().__init__(lowerCamelCase_ , task=lowerCamelCase_ , patching_specs=lowerCamelCase_ , use_past=lowerCamelCase_ )
if not getattr(self._config , "pad_token_id" , lowerCamelCase_ ):
# TODO: how to do that better?
UpperCamelCase__ = 0
@property
def lowerCamelCase__ ( self :Dict ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
UpperCamelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCamelCase_ , direction="inputs" , inverted_values_shape=lowerCamelCase_ )
UpperCamelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCamelCase__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase__ ( self :Tuple ) -> int:
"""simple docstring"""
return self._config.n_head
@property
def lowerCamelCase__ ( self :Optional[Any] ) -> float:
"""simple docstring"""
return 1e-3
def lowerCamelCase__ ( self :Dict , lowerCamelCase_ :"PreTrainedTokenizer" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase__ = super(lowerCamelCase_ , self ).generate_dummy_inputs(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
# We need to order the input in the way they appears in the forward()
UpperCamelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase__ , UpperCamelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCamelCase__ = seqlen + 2
UpperCamelCase__ = self._config.hidden_size // self.num_attention_heads
UpperCamelCase__ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCamelCase__ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCamelCase__ = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(self.num_layers )
]
UpperCamelCase__ = common_inputs["attention_mask"]
if self.use_past:
UpperCamelCase__ = ordered_inputs["attention_mask"].dtype
UpperCamelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase__ ( self :int ) -> int:
"""simple docstring"""
return 1_3 | 516 | 1 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_A = logging.get_logger(__name__)
def lowercase_ ( ) -> Optional[Any]:
"""simple docstring"""
snake_case = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
snake_case = json.loads(A__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
snake_case = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
snake_case = json.loads(A__ )
if not mpi_options.get("sagemaker_mpi_enabled" , A__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def UpperCAmelCase(self : int ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , _A , )
@cached_property
def UpperCAmelCase(self : Optional[int] ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
snake_case = torch.device("cpu" )
snake_case = 0
elif is_sagemaker_model_parallel_available():
snake_case = smp.local_rank()
snake_case = torch.device("cuda" , _A )
snake_case = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
snake_case = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
snake_case = torch.device("cuda" , self.local_rank )
snake_case = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
snake_case = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
snake_case = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
snake_case = torch.device("cuda" , self.local_rank )
snake_case = 1
if device.type == "cuda":
torch.cuda.set_device(_A )
return device
@property
def UpperCAmelCase(self : Optional[Any] ) -> int:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase(self : List[str] ) -> Dict:
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase(self : Optional[Any] ) -> Dict:
return False
| 294 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> List[Any]:
"""simple docstring"""
snake_case = {}
if train_file is not None:
snake_case = [train_file]
if eval_file is not None:
snake_case = [eval_file]
if test_file is not None:
snake_case = [test_file]
snake_case = datasets.load_dataset("csv" , data_files=A__ )
snake_case = list(ds[list(files.keys() )[0]].features.keys() )
snake_case = features_name.pop(A__ )
snake_case = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case = {label: i for i, label in enumerate(A__ )}
snake_case = tokenizer.model_input_names
snake_case = {}
if len(A__ ) == 1:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding="max_length" ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding="max_length" , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_A = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : int = field(metadata={"help": "Which column contains the label"} )
UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "The path of the training file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the development file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the test file"} )
UpperCAmelCase__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case , snake_case , snake_case , snake_case = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
snake_case = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case = trainer.evaluate()
snake_case = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(A__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 294 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : str = '''wavlm'''
def __init__( self ,lowerCamelCase_=32 ,lowerCamelCase_=768 ,lowerCamelCase_=12 ,lowerCamelCase_=12 ,lowerCamelCase_=3072 ,lowerCamelCase_="gelu" ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.0 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.1 ,lowerCamelCase_=0.02 ,lowerCamelCase_=1e-5 ,lowerCamelCase_="group" ,lowerCamelCase_="gelu" ,lowerCamelCase_=(512, 512, 512, 512, 512, 512, 512) ,lowerCamelCase_=(5, 2, 2, 2, 2, 2, 2) ,lowerCamelCase_=(10, 3, 3, 3, 3, 2, 2) ,lowerCamelCase_=False ,lowerCamelCase_=128 ,lowerCamelCase_=16 ,lowerCamelCase_=320 ,lowerCamelCase_=800 ,lowerCamelCase_=False ,lowerCamelCase_=True ,lowerCamelCase_=0.05 ,lowerCamelCase_=10 ,lowerCamelCase_=2 ,lowerCamelCase_=0.0 ,lowerCamelCase_=10 ,lowerCamelCase_=320 ,lowerCamelCase_=2 ,lowerCamelCase_=0.1 ,lowerCamelCase_=100 ,lowerCamelCase_=256 ,lowerCamelCase_=256 ,lowerCamelCase_=0.1 ,lowerCamelCase_="mean" ,lowerCamelCase_=False ,lowerCamelCase_=False ,lowerCamelCase_=256 ,lowerCamelCase_=(512, 512, 512, 512, 1500) ,lowerCamelCase_=(5, 3, 3, 1, 1) ,lowerCamelCase_=(1, 2, 3, 1, 1) ,lowerCamelCase_=512 ,lowerCamelCase_=80 ,lowerCamelCase_=0 ,lowerCamelCase_=1 ,lowerCamelCase_=2 ,lowerCamelCase_=False ,lowerCamelCase_=3 ,lowerCamelCase_=2 ,lowerCamelCase_=3 ,lowerCamelCase_=None ,**lowerCamelCase_ ,) -> int:
'''simple docstring'''
super().__init__(**lowerCamelCase_ ,pad_token_id=lowerCamelCase_ ,bos_token_id=lowerCamelCase_ ,eos_token_id=lowerCamelCase_ )
UpperCAmelCase__ : Tuple = hidden_size
UpperCAmelCase__ : Any = feat_extract_norm
UpperCAmelCase__ : Union[str, Any] = feat_extract_activation
UpperCAmelCase__ : Any = list(lowerCamelCase_ )
UpperCAmelCase__ : Tuple = list(lowerCamelCase_ )
UpperCAmelCase__ : int = list(lowerCamelCase_ )
UpperCAmelCase__ : List[str] = conv_bias
UpperCAmelCase__ : Optional[Any] = num_buckets
UpperCAmelCase__ : Any = max_bucket_distance
UpperCAmelCase__ : Union[str, Any] = num_conv_pos_embeddings
UpperCAmelCase__ : List[Any] = num_conv_pos_embedding_groups
UpperCAmelCase__ : List[Any] = len(self.conv_dim )
UpperCAmelCase__ : str = num_hidden_layers
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Optional[Any] = hidden_dropout
UpperCAmelCase__ : Union[str, Any] = attention_dropout
UpperCAmelCase__ : Dict = activation_dropout
UpperCAmelCase__ : Union[str, Any] = feat_proj_dropout
UpperCAmelCase__ : List[Any] = final_dropout
UpperCAmelCase__ : Tuple = layerdrop
UpperCAmelCase__ : Any = layer_norm_eps
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = num_ctc_classes
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = do_stable_layer_norm
UpperCAmelCase__ : Union[str, Any] = use_weighted_layer_sum
UpperCAmelCase__ : Union[str, Any] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ : Optional[Any] = apply_spec_augment
UpperCAmelCase__ : List[str] = mask_time_prob
UpperCAmelCase__ : int = mask_time_length
UpperCAmelCase__ : List[Any] = mask_time_min_masks
UpperCAmelCase__ : Optional[int] = mask_feature_prob
UpperCAmelCase__ : List[Any] = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCAmelCase__ : Optional[int] = num_codevectors_per_group
UpperCAmelCase__ : List[Any] = num_codevector_groups
UpperCAmelCase__ : int = contrastive_logits_temperature
UpperCAmelCase__ : Any = num_negatives
UpperCAmelCase__ : Optional[Any] = codevector_dim
UpperCAmelCase__ : str = proj_codevector_dim
UpperCAmelCase__ : Union[str, Any] = diversity_loss_weight
# ctc loss
UpperCAmelCase__ : List[Any] = ctc_loss_reduction
UpperCAmelCase__ : Tuple = ctc_zero_infinity
# adapter
UpperCAmelCase__ : List[Any] = add_adapter
UpperCAmelCase__ : Dict = adapter_kernel_size
UpperCAmelCase__ : Optional[int] = adapter_stride
UpperCAmelCase__ : Dict = num_adapter_layers
UpperCAmelCase__ : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase__ : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase__ : Any = list(lowerCamelCase_ )
UpperCAmelCase__ : Tuple = list(lowerCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = list(lowerCamelCase_ )
UpperCAmelCase__ : Tuple = xvector_output_dim
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 614 | '''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase__ : str = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : str = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
UpperCAmelCase__ : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : int = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
UpperCAmelCase__ : int = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 614 | 1 |
"""simple docstring"""
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = r'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class __lowercase ( __lowerCamelCase ):
@add_start_docstrings(A )
def __call__( self : List[Any] ,A : torch.LongTensor ,A : torch.FloatTensor ,**A : Tuple ):
'''simple docstring'''
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class __lowercase ( __lowerCamelCase ):
def __init__( self : int ,A : int ,A : Optional[int] = None ):
'''simple docstring'''
UpperCAmelCase__ : Dict = max_length
UpperCAmelCase__ : Dict = max_position_embeddings
@add_start_docstrings(A )
def __call__( self : Optional[Any] ,A : torch.LongTensor ,A : torch.FloatTensor ,**A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = input_ids.shape[-1]
UpperCAmelCase__ : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class __lowercase ( __lowerCamelCase ):
def __init__( self : Dict ,A : int ,A : int ):
'''simple docstring'''
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
"""with `max_length = start_length + max_new_tokens` instead.""" ,A ,)
UpperCAmelCase__ : Union[str, Any] = start_length
UpperCAmelCase__ : Optional[int] = max_new_tokens
UpperCAmelCase__ : Dict = start_length + max_new_tokens
@add_start_docstrings(A )
def __call__( self : Dict ,A : torch.LongTensor ,A : torch.FloatTensor ,**A : int ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class __lowercase ( __lowerCamelCase ):
def __init__( self : int ,A : float ,A : Optional[float] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = max_time
UpperCAmelCase__ : Optional[int] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(A )
def __call__( self : Dict ,A : torch.LongTensor ,A : torch.FloatTensor ,**A : List[Any] ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class __lowercase ( __lowerCamelCase ):
@add_start_docstrings(A )
def __call__( self : List[str] ,A : torch.LongTensor ,A : torch.FloatTensor ,**A : Tuple ):
'''simple docstring'''
return any(criteria(A ,A ) for criteria in self )
@property
def __lowercase ( self : int ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(A ,A ):
return stopping_criterium.max_length
elif isinstance(A ,A ):
return stopping_criterium.max_length
return None
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Any = stopping_criteria.max_length
UpperCAmelCase__ : str = deepcopy(__UpperCamelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , __UpperCamelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__UpperCamelCase ) )
return new_stopping_criteria
| 194 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__UpperCAmelCase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__UpperCAmelCase = {
'allenai/led-base-16384': 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase__ : int = bs[:]
UpperCAmelCase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__UpperCamelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Tuple = [chr(__UpperCamelCase ) for n in cs]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = set()
UpperCAmelCase__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[Any] = char
return pairs
class __lowercase ( __lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Union[str, Any] ,A : Any ,A : Dict ,A : Optional[Any]="replace" ,A : Dict="<s>" ,A : str="</s>" ,A : str="</s>" ,A : Dict="<s>" ,A : List[str]="<unk>" ,A : Union[str, Any]="<pad>" ,A : Any="<mask>" ,A : str=False ,**A : Optional[Any] ,):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else bos_token
UpperCAmelCase__ : Any = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else eos_token
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else sep_token
UpperCAmelCase__ : Tuple = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else cls_token
UpperCAmelCase__ : Tuple = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else unk_token
UpperCAmelCase__ : List[str] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Union[str, Any] = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
super().__init__(
errors=A ,bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,add_prefix_space=A ,**A ,)
with open(A ,encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase__ : Tuple = json.load(A )
UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[Any] = errors # how to handle errors in decoding
UpperCAmelCase__ : List[str] = bytes_to_unicode()
UpperCAmelCase__ : int = {v: k for k, v in self.byte_encoder.items()}
with open(A ,encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase__ : List[Any] = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase__ : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : Any = dict(zip(A ,range(len(A ) ) ) )
UpperCAmelCase__ : Optional[Any] = {}
UpperCAmelCase__ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : Union[str, Any] = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowercase ( self : Optional[int] ,A : Union[str, Any] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(A )
UpperCAmelCase__ : int = get_pairs(A )
if not pairs:
return token
while True:
UpperCAmelCase__ : str = min(A ,key=lambda A : self.bpe_ranks.get(A ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = 0
while i < len(A ):
try:
UpperCAmelCase__ : Optional[Any] = word.index(A ,A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : int = j
if word[i] == first and i < len(A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : List[str] = tuple(A )
UpperCAmelCase__ : str = new_word
if len(A ) == 1:
break
else:
UpperCAmelCase__ : str = get_pairs(A )
UpperCAmelCase__ : int = """ """.join(A )
UpperCAmelCase__ : List[str] = word
return word
def __lowercase ( self : Optional[Any] ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
for token in re.findall(self.pat ,A ):
UpperCAmelCase__ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A ).split(""" """ ) )
return bpe_tokens
def __lowercase ( self : Dict ,A : int ):
'''simple docstring'''
return self.encoder.get(A ,self.encoder.get(self.unk_token ) )
def __lowercase ( self : Dict ,A : Optional[Any] ):
'''simple docstring'''
return self.decoder.get(A )
def __lowercase ( self : Optional[Any] ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = """""".join(A )
UpperCAmelCase__ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def __lowercase ( self : Optional[int] ,A : str ,A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__ : Any = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : List[str] = os.path.join(
A ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(A ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A ,ensure_ascii=A ) + """\n""" )
UpperCAmelCase__ : Any = 0
with open(A ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase__ : Optional[int] = token_index
writer.write(""" """.join(A ) + """\n""" )
index += 1
return vocab_file, merge_file
def __lowercase ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
UpperCAmelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : int ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def __lowercase ( self : Tuple ,A : List[int] ,A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowercase ( self : Any ,A : str ,A : List[Any]=False ,**A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Dict = """ """ + text
return (text, kwargs)
def __lowercase ( self : Dict ,A : Union[Dict[str, EncodedInput], BatchEncoding] ,A : Optional[int] = None ,A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,A : Optional[int] = None ,A : Optional[bool] = None ,):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = super()._pad(
encoded_inputs=A ,max_length=A ,padding_strategy=A ,pad_to_multiple_of=A ,return_attention_mask=A ,)
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ : str = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ : Optional[int] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ : Tuple = len(encoded_inputs["""global_attention_mask"""] ) != len(A )
if needs_to_be_padded:
UpperCAmelCase__ : List[Any] = len(A ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ : Tuple = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ : Dict = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 194 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCamelCase :int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = 50 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE__ ):
_UpperCamelCase :Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_UpperCamelCase :Tuple = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and len(SCREAMING_SNAKE_CASE__ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE__ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_UpperCamelCase :Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCamelCase :List[str] = self.unet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCamelCase :Optional[Any] = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ , use_clipped_model_output=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
_UpperCamelCase :str = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCamelCase :int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCamelCase :Union[str, Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 355 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=18 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=4_00 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Dict = size if size is not None else {'''shortest_edge''': 20}
_UpperCamelCase :Union[str, Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase :str = parent
_UpperCamelCase :str = batch_size
_UpperCamelCase :str = num_channels
_UpperCamelCase :Optional[Any] = image_size
_UpperCamelCase :List[str] = min_resolution
_UpperCamelCase :Tuple = max_resolution
_UpperCamelCase :List[Any] = do_resize
_UpperCamelCase :Tuple = size
_UpperCamelCase :int = do_center_crop
_UpperCamelCase :Optional[int] = crop_size
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A = MobileNetVaImageProcessor if is_vision_available() else None
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :int = MobileNetVaImageProcessingTester(self )
@property
def _UpperCamelCase( self ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''crop_size''' ) )
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_UpperCamelCase :List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
pass
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
_UpperCamelCase :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase :Any = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
_UpperCamelCase :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase :List[str] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
_UpperCamelCase :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase :Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 355 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = CTRLTokenizer
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_A = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
_A = dict(zip(a__ , range(len(a__ ) ) ) )
_A = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
_A = {"unk_token": "<unk>"}
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a__ ) )
def a_ ( self : str , **a__ : str ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self : List[str] , a__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = "adapt react readapt apt"
_A = "adapt react readapt apt"
return input_text, output_text
def a_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_A = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_A = "adapt react readapt apt"
_A = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
_A = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_A = tokens + [tokenizer.unk_token]
_A = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ ) | 621 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output | 621 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : str = 'codegen'
__UpperCamelCase : Tuple = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Union[str, Any] ,lowerCamelCase : Dict=5_0400 ,lowerCamelCase : Union[str, Any]=2048 ,lowerCamelCase : Optional[int]=2048 ,lowerCamelCase : Dict=4096 ,lowerCamelCase : int=28 ,lowerCamelCase : Dict=16 ,lowerCamelCase : int=64 ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : List[Any]="gelu_new" ,lowerCamelCase : Optional[int]=0.0 ,lowerCamelCase : str=0.0 ,lowerCamelCase : Union[str, Any]=0.0 ,lowerCamelCase : Optional[int]=1E-5 ,lowerCamelCase : int=0.02 ,lowerCamelCase : Union[str, Any]=True ,lowerCamelCase : Any=5_0256 ,lowerCamelCase : Dict=5_0256 ,lowerCamelCase : str=False ,**lowerCamelCase : Optional[int] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = n_ctx
__SCREAMING_SNAKE_CASE = n_positions
__SCREAMING_SNAKE_CASE = n_embd
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = n_inner
__SCREAMING_SNAKE_CASE = rotary_dim
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = resid_pdrop
__SCREAMING_SNAKE_CASE = embd_pdrop
__SCREAMING_SNAKE_CASE = attn_pdrop
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,tie_word_embeddings=lowerCamelCase ,**lowerCamelCase )
class __a ( _snake_case ):
def __init__( self : Optional[int] ,lowerCamelCase : PretrainedConfig ,lowerCamelCase : str = "default" ,lowerCamelCase : List[PatchingSpec] = None ,lowerCamelCase : bool = False ,):
'''simple docstring'''
super().__init__(lowerCamelCase ,task=lowerCamelCase ,patching_specs=lowerCamelCase ,use_past=lowerCamelCase )
if not getattr(self._config ,"""pad_token_id""" ,lowerCamelCase ):
# TODO: how to do that better?
__SCREAMING_SNAKE_CASE = 0
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase ,direction="""inputs""" )
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self._config.n_head
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : PreTrainedTokenizer ,lowerCamelCase : int = -1 ,lowerCamelCase : int = -1 ,lowerCamelCase : bool = False ,lowerCamelCase : Optional[TensorType] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super(lowerCamelCase ,self ).generate_dummy_inputs(
lowerCamelCase ,batch_size=lowerCamelCase ,seq_length=lowerCamelCase ,is_pair=lowerCamelCase ,framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__SCREAMING_SNAKE_CASE = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""]
if self.use_past:
__SCREAMING_SNAKE_CASE = ordered_inputs["""attention_mask"""].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase ,lowerCamelCase ,dtype=lowerCamelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return 13
| 109 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(lowerCamelCase ,"""w""" ) )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(lowerCamelCase ,"""w""" ) )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(lowerCamelCase ,"""w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase ).to_dict()
config_dict.pop("""image_processor_type""" )
__SCREAMING_SNAKE_CASE = CLIPImageProcessor(**lowerCamelCase )
# save in new folder
model_config.save_pretrained(lowerCamelCase )
config.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
# make sure private variable is not incorrectly saved
__SCREAMING_SNAKE_CASE = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase ,"""clip-base is not a local folder and is not a valid model identifier""" ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""clip-base""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase ,revision="""aaaaaa""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCamelCase ,"""hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" ,):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase ,trust_remote_code=lowerCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,"""NewImageProcessor""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,lowerCamelCase )
AutoImageProcessor.register(lowerCamelCase ,lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase ):
AutoImageProcessor.register(lowerCamelCase ,lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """preprocessor_config.json"""
__SCREAMING_SNAKE_CASE = Path(lowerCamelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(lowerCamelCase ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(lowerCamelCase ,"""w""" ) )
__SCREAMING_SNAKE_CASE = CustomImageProcessor.from_pretrained(lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
class __a ( _snake_case ):
__UpperCamelCase : int = True
try:
AutoConfig.register("""custom""" ,lowerCamelCase )
AutoImageProcessor.register(lowerCamelCase ,lowerCamelCase )
# If remote code is not set, the default is to use local
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(not hasattr(lowerCamelCase ,"""is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 109 | 1 |
"""simple docstring"""
def A_ ( _lowerCAmelCase : int = 10_00 ):
"""simple docstring"""
_a , _a = 1, 1
_a = 2
while True:
_a = 0
_a = fa + fa
_a , _a = fa, f
index += 1
for _ in str(_lowerCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 701 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
if "img_encoder.pos_embed" in name:
_a = name.replace('''img_encoder.pos_embed''', '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
_a = name.replace('''img_encoder.patch_embed.proj''', '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
_a = name.replace('''img_encoder.patch_embed.norm''', '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
_a = name.replace('''img_encoder.layers''', '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
_a = name.replace('''blocks''', '''layers''' )
if "attn" in name and "pre_assign" not in name:
_a = name.replace('''attn''', '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
_a = name.replace('''proj''', '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
_a = name.replace('''pre_assign_attn.attn.proj''', '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
_a = name.replace('''norm1''', '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
_a = name.replace('''norm2''', '''layer_norm2''' )
if "img_encoder.norm" in name:
_a = name.replace('''img_encoder.norm''', '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
_a = name.replace('''text_encoder.token_embedding''', '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
_a = name.replace('''text_encoder.positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
_a = name.replace('''text_encoder.transformer.resblocks.''', '''text_model.encoder.layers.''' )
if "ln_1" in name:
_a = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
_a = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
_a = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
_a = name.replace('''c_proj''', '''fc2''' )
if "text_encoder" in name:
_a = name.replace('''text_encoder''', '''text_model''' )
if "ln_final" in name:
_a = name.replace('''ln_final''', '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
_a = name.replace('''img_projector.linear_hidden.''', '''visual_projection.''' )
if "img_projector.linear_out." in name:
_a = name.replace('''img_projector.linear_out.''', '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
_a = name.replace('''text_projector.linear_hidden''', '''text_projection''' )
if "text_projector.linear_out" in name:
_a = name.replace('''text_projector.linear_out''', '''text_projection.3''' )
return name
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(_lowerCAmelCase )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_a = key.split('''.''' )
_a , _a = int(key_split[2] ), int(key_split[4] )
_a = config.vision_config.hidden_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_a = key.split('''.''' )
_a = int(key_split[3] )
_a = config.text_config.hidden_size
if "weight" in key:
_a = val[:dim, :]
_a = val[
dim : dim * 2, :
]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = rename_key(_lowerCAmelCase )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_a = val.squeeze_()
else:
_a = val
return orig_state_dict
def A_ ( ):
"""simple docstring"""
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(_lowerCAmelCase, stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : List[str], _lowerCAmelCase : Dict="groupvit-gcc-yfcc", _lowerCAmelCase : str=False ):
"""simple docstring"""
_a = GroupViTConfig()
_a = GroupViTModel(_lowerCAmelCase ).eval()
_a = torch.load(_lowerCAmelCase, map_location='''cpu''' )['''model''']
_a = convert_state_dict(_lowerCAmelCase, _lowerCAmelCase )
_a , _a = model.load_state_dict(_lowerCAmelCase, strict=_lowerCAmelCase )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(_lowerCAmelCase ) == 0)
# verify result
_a = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
_a = prepare_img()
_a = processor(text=['''a photo of a cat''', '''a photo of a dog'''], images=_lowerCAmelCase, padding=_lowerCAmelCase, return_tensors='''pt''' )
with torch.no_grad():
_a = model(**_lowerCAmelCase )
if model_name == "groupvit-gcc-yfcc":
_a = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_a = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image, _lowerCAmelCase, atol=1e-3 )
processor.save_pretrained(_lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
print('''Successfully saved processor and model to''', _lowerCAmelCase )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(_lowerCAmelCase, organization='''nielsr''' )
model.push_to_hub(_lowerCAmelCase, organization='''nielsr''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''')
parser.add_argument(
'''--model_name''',
default='''groupvit-gccy-fcc''',
type=str,
help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''',
)
__snake_case = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 285 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000 ) -> int:
lowercase : Optional[Any] =2**power
lowercase : Dict =0
while n:
lowercase , lowercase : Optional[Any] =r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 92 |
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str = "cpu" , __lowerCAmelCase : Union[str, None] = None ):
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCAmelCase , torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowerCamelCase__ = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase__ = src_path
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 50 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( A__ , unittest.TestCase ):
"""simple docstring"""
_lowercase = KandinskyVaaInpaintPipeline
_lowercase = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_lowercase = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_lowercase = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_lowercase = False
@property
def _UpperCamelCase( self : Tuple ):
return 32
@property
def _UpperCamelCase( self : Union[str, Any] ):
return 32
@property
def _UpperCamelCase( self : Dict ):
return self.time_input_dim
@property
def _UpperCamelCase( self : Tuple ):
return self.time_input_dim * 4
@property
def _UpperCamelCase( self : Tuple ):
return 100
@property
def _UpperCamelCase( self : List[str] ):
torch.manual_seed(0 )
a__ : Optional[int] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a__ : Tuple = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def _UpperCamelCase( self : Tuple ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase( self : List[Any] ):
torch.manual_seed(0 )
a__ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase( self : Union[str, Any] ):
a__ : Optional[Any] = self.dummy_unet
a__ : Any = self.dummy_movq
a__ : List[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCamelCase__ , )
a__ : List[str] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCamelCase( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict=0 ):
a__ : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
a__ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
# create init_image
a__ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
a__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a__ : Dict = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
a__ : Optional[int] = np.ones((64, 64) , dtype=np.floataa )
a__ : Optional[int] = 0
if str(lowerCamelCase__ ).startswith("mps" ):
a__ : Tuple = torch.manual_seed(lowerCamelCase__ )
else:
a__ : Union[str, Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
a__ : Tuple = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCamelCase( self : Dict ):
a__ : List[str] = "cpu"
a__ : Union[str, Any] = self.get_dummy_components()
a__ : Any = self.pipeline_class(**lowerCamelCase__ )
a__ : List[str] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : Union[str, Any] = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
a__ : List[Any] = output.images
a__ : List[str] = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
a__ : Any = image[0, -3:, -3:, -1]
a__ : Dict = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
a__ : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _UpperCamelCase( self : Optional[int] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase( self : List[str] ):
a__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
a__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
a__ : Union[str, Any] = np.ones((768, 768) , dtype=np.floataa )
a__ : Optional[Any] = 0
a__ : List[str] = "a hat"
a__ : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
a__ : Optional[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
a__ : Tuple = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
a__ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
a__, a__ : List[str] = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a__ : int = pipeline(
image=lowerCamelCase__ , mask_image=lowerCamelCase__ , image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
a__ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 151 |
from itertools import product
def UpperCamelCase_ ( __a , __a ) -> list[int]:
a__ : Union[str, Any] = sides_number
a__ : Optional[int] = max_face_number * dice_number
a__ : Any = [0] * (max_total + 1)
a__ : Optional[Any] = 1
a__ : Any = range(__a , max_face_number + 1 )
for dice_numbers in product(__a , repeat=__a ):
a__ : Any = sum(__a )
totals_frequencies[total] += 1
return totals_frequencies
def UpperCamelCase_ ( ) -> float:
a__ : str = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a__ : Optional[int] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a__ : Optional[Any] = 0
a__ : Optional[int] = 9
a__ : str = 4 * 9
a__ : Tuple = 6
for peter_total in range(__a , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a__ : List[str] = (4**9) * (6**6)
a__ : Union[str, Any] = peter_wins_count / total_games_number
a__ : Dict = round(__a , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 151 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
SCREAMING_SNAKE_CASE_ = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(__lowerCAmelCase )
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """rag"""
__lowerCAmelCase = True
def __init__( self , snake_case_=None , snake_case_=True , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=" / " , snake_case_=" // " , snake_case_=5 , snake_case_=300 , snake_case_=768 , snake_case_=8 , snake_case_="wiki_dpr" , snake_case_="train" , snake_case_="compressed" , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=False , snake_case_=0.0 , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=True , snake_case_=None , **snake_case_ , ):
'''simple docstring'''
super().__init__(
bos_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , prefix=snake_case_ , vocab_size=snake_case_ , **snake_case_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__UpperCAmelCase: Any = kwargs.pop("""question_encoder""" )
__UpperCAmelCase: Any = question_encoder_config.pop("""model_type""" )
__UpperCAmelCase: Optional[int] = kwargs.pop("""generator""" )
__UpperCAmelCase: Union[str, Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
__UpperCAmelCase: int = AutoConfig.for_model(snake_case_ , **snake_case_ )
__UpperCAmelCase: str = AutoConfig.for_model(snake_case_ , **snake_case_ )
__UpperCAmelCase: str = reduce_loss
__UpperCAmelCase: Any = label_smoothing
__UpperCAmelCase: List[str] = exclude_bos_score
__UpperCAmelCase: List[str] = do_marginalize
__UpperCAmelCase: Any = title_sep
__UpperCAmelCase: List[Any] = doc_sep
__UpperCAmelCase: Tuple = n_docs
__UpperCAmelCase: Optional[Any] = max_combined_length
__UpperCAmelCase: Tuple = dataset
__UpperCAmelCase: str = dataset_split
__UpperCAmelCase: Any = index_name
__UpperCAmelCase: Tuple = retrieval_vector_size
__UpperCAmelCase: List[Any] = retrieval_batch_size
__UpperCAmelCase: Dict = passages_path
__UpperCAmelCase: Union[str, Any] = index_path
__UpperCAmelCase: Any = use_dummy_dataset
__UpperCAmelCase: Tuple = output_retrieved
__UpperCAmelCase: str = do_deduplication
__UpperCAmelCase: Optional[int] = use_cache
if self.forced_eos_token_id is None:
__UpperCAmelCase: Optional[Any] = getattr(self.generator , """forced_eos_token_id""" , snake_case_ )
@classmethod
def lowercase_ ( cls , snake_case_ , snake_case_ , **snake_case_ ):
'''simple docstring'''
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase: str = self.question_encoder.to_dict()
__UpperCAmelCase: Union[str, Any] = self.generator.to_dict()
__UpperCAmelCase: Dict = self.__class__.model_type
return output | 523 | '''simple docstring'''
def UpperCamelCase__ ( _lowercase : List[Any] ) -> Dict:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def UpperCamelCase__ ( _lowercase : dict[int, list[int]] ) -> list[tuple[int, int]]:
__UpperCAmelCase: Any = 0
__UpperCAmelCase: List[Any] = len(_lowercase ) # No of vertices in graph
__UpperCAmelCase: Optional[Any] = [0] * n
__UpperCAmelCase: Dict = [False] * n
def dfs(_lowercase : Any , _lowercase : List[Any] , _lowercase : int , _lowercase : Optional[int] ):
__UpperCAmelCase: List[str] = True
__UpperCAmelCase: int = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(_lowercase , _lowercase , _lowercase , id_ )
__UpperCAmelCase: Any = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__UpperCAmelCase: List[str] = min(low[at] , low[to] )
__UpperCAmelCase: list[tuple[int, int]] = []
for i in range(_lowercase ):
if not visited[i]:
dfs(_lowercase , -1 , _lowercase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod() | 523 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A :int = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A :Union[str, Any] = "CIDAS/clipseg-rd64-refined"
A :Optional[Any] = "image_segmenter"
A :str = CLIPSegForImageSegmentation
A :Tuple = ["image", "text"]
A :Union[str, Any] = ["image"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=__UpperCAmelCase , return_tensors="pt" )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
with torch.no_grad():
a__ : str = self.model(**__UpperCAmelCase ).logits
return logits
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : List[str] = outputs.cpu().detach().numpy()
a__ : Tuple = 0
a__ : Dict = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 713 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> int:
a__ : List[Any] = prime_factors(__UpperCamelCase )
if is_square_free(__UpperCamelCase ):
return -1 if len(__UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: int , _lowerCAmelCase: int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =jnp.ones((batch_size, length) ) / length
return scores
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =None
UpperCAmelCase_ =20
UpperCAmelCase_ =self._get_uniform_logits(batch_size=2 , length=_lowerCAmelCase )
# tweak scores to not be uniform anymore
UpperCAmelCase_ =scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
UpperCAmelCase_ =scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
UpperCAmelCase_ =jax.nn.softmax(_lowerCAmelCase , axis=-1 )
UpperCAmelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase_ =FlaxTemperatureLogitsWarper(temperature=1.3 )
UpperCAmelCase_ =jax.nn.softmax(temp_dist_warper_sharper(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
UpperCAmelCase_ =jax.nn.softmax(temp_dist_warper_smoother(_lowerCAmelCase , scores.copy() , cur_len=_lowerCAmelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =None
UpperCAmelCase_ =10
UpperCAmelCase_ =2
# create ramp distribution
UpperCAmelCase_ =np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy()
UpperCAmelCase_ =ramp_logits[1:, : vocab_size // 2] + vocab_size
UpperCAmelCase_ =FlaxTopKLogitsWarper(3 )
UpperCAmelCase_ =top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
UpperCAmelCase_ =5
UpperCAmelCase_ =FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
UpperCAmelCase_ =np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, length) ).copy()
UpperCAmelCase_ =top_k_warp_safety_check(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowerCAmelCase__ ( self: Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =None
UpperCAmelCase_ =10
UpperCAmelCase_ =2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
UpperCAmelCase_ =np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
UpperCAmelCase_ =FlaxTopPLogitsWarper(0.8 )
UpperCAmelCase_ =np.exp(top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
UpperCAmelCase_ =np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# check edge cases with negative and extreme logits
UpperCAmelCase_ =np.broadcast_to(np.arange(_lowerCAmelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
UpperCAmelCase_ =ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
UpperCAmelCase_ =FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
UpperCAmelCase_ =top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =20
UpperCAmelCase_ =4
UpperCAmelCase_ =0
UpperCAmelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
# check that min length is applied at length 5
UpperCAmelCase_ =ids_tensor((batch_size, 20) , vocab_size=20 )
UpperCAmelCase_ =5
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =15
UpperCAmelCase_ =min_dist_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase__ ( self: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =20
UpperCAmelCase_ =4
UpperCAmelCase_ =0
UpperCAmelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the bos_token_id score
UpperCAmelCase_ =ids_tensor((batch_size, 1) , vocab_size=20 )
UpperCAmelCase_ =1
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
UpperCAmelCase_ =3
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =20
UpperCAmelCase_ =4
UpperCAmelCase_ =0
UpperCAmelCase_ =5
UpperCAmelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
UpperCAmelCase_ =ids_tensor((batch_size, 4) , vocab_size=20 )
UpperCAmelCase_ =4
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
UpperCAmelCase_ =3
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =logits_processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
self.assertFalse(jnp.isinf(_lowerCAmelCase ).any() )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =4
UpperCAmelCase_ =10
UpperCAmelCase_ =15
UpperCAmelCase_ =2
UpperCAmelCase_ =1
UpperCAmelCase_ =15
# dummy input_ids and scores
UpperCAmelCase_ =ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
UpperCAmelCase_ =input_ids.copy()
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =scores.copy()
# instantiate all dist processors
UpperCAmelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase_ =FlaxTopKLogitsWarper(3 )
UpperCAmelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =10
# no processor list
UpperCAmelCase_ =temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# with processor list
UpperCAmelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase_ =processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =4
UpperCAmelCase_ =10
UpperCAmelCase_ =15
UpperCAmelCase_ =2
UpperCAmelCase_ =1
UpperCAmelCase_ =15
# dummy input_ids and scores
UpperCAmelCase_ =ids_tensor((batch_size, sequence_length) , _lowerCAmelCase )
UpperCAmelCase_ =input_ids.copy()
UpperCAmelCase_ =self._get_uniform_logits(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =scores.copy()
# instantiate all dist processors
UpperCAmelCase_ =FlaxTemperatureLogitsWarper(temperature=0.5 )
UpperCAmelCase_ =FlaxTopKLogitsWarper(3 )
UpperCAmelCase_ =FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
UpperCAmelCase_ =FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCAmelCase , eos_token_id=_lowerCAmelCase )
UpperCAmelCase_ =10
# no processor list
def run_no_processor_list(_lowerCAmelCase: int , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Dict ):
UpperCAmelCase_ =temp_dist_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =top_k_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =top_p_warp(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =min_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =bos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
UpperCAmelCase_ =eos_dist_proc(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
# with processor list
def run_processor_list(_lowerCAmelCase: Optional[int] , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[int] ):
UpperCAmelCase_ =FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
UpperCAmelCase_ =processor(_lowerCAmelCase , _lowerCAmelCase , cur_len=_lowerCAmelCase )
return scores
UpperCAmelCase_ =jax.jit(_lowerCAmelCase )
UpperCAmelCase_ =jax.jit(_lowerCAmelCase )
UpperCAmelCase_ =jitted_run_no_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =jitted_run_processor_list(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 54 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__lowercase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = '''albert'''
def __init__( self , __lowerCAmelCase=30000 , __lowerCAmelCase=128 , __lowerCAmelCase=4096 , __lowerCAmelCase=12 , __lowerCAmelCase=1 , __lowerCAmelCase=64 , __lowerCAmelCase=16384 , __lowerCAmelCase=1 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-1_2 , __lowerCAmelCase=0.1 , __lowerCAmelCase="absolute" , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = vocab_size
lowerCAmelCase = embedding_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_hidden_groups
lowerCAmelCase = num_attention_heads
lowerCAmelCase = inner_group_num
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = classifier_dropout_prob
lowerCAmelCase = position_embedding_type
class a__( lowerCAmelCase__ ):
'''simple docstring'''
@property
def a_ ( self):
"""simple docstring"""
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
])
| 370 | 0 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
snake_case : Optional[int] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
snake_case : List[Any] = get_tests_dir('''fixtures/vocab.json''')
snake_case : Optional[Any] = get_tests_dir('''fixtures''')
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def lowerCamelCase__( self :str ) -> Optional[Any]:
a__ = 0
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
a__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Dict ) -> int:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaConfig()
a__ = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
a__ = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case ,os.path.join(__snake_case ,__snake_case ) )
copyfile(__snake_case ,os.path.join(__snake_case ,'vocab.json' ) )
a__ = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Dict ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaFeatureExtractor()
a__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
a__ = WavaVecaProcessor(__snake_case ,__snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case ,__snake_case ) ,'r' ) as f:
a__ = json.load(__snake_case )
config_dict.pop('processor_class' )
with open(os.path.join(__snake_case ,__snake_case ) ,'w' ) as f:
f.write(json.dumps(__snake_case ) )
a__ = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaFeatureExtractor()
a__ = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
a__ = WavaVecaProcessor(__snake_case ,__snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case ,__snake_case ) ,'r' ) as f:
a__ = json.load(__snake_case )
config_dict.pop('processor_class' )
with open(os.path.join(__snake_case ,__snake_case ) ,'w' ) as f:
f.write(json.dumps(__snake_case ) )
a__ = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[int] ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case ,os.path.join(__snake_case ,'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(__snake_case ,__snake_case ) ,'w' ) as f:
f.write('{}' )
a__ = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
a__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
a__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__snake_case )
a__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
a__ = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ ,'NewFeatureExtractor' )
a__ = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizerFast' )
# Test we can also load the slow version
a__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__snake_case ,use_fast=__snake_case )
a__ = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ ,'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'NewTokenizer' )
def lowerCamelCase__( self :Tuple ) -> Any:
try:
AutoConfig.register('custom' ,__snake_case )
AutoFeatureExtractor.register(__snake_case ,__snake_case )
AutoTokenizer.register(__snake_case ,slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case ,__snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case ,__snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
a__ = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
a__ = os.path.join(__snake_case ,'vocab.txt' )
with open(__snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
a__ = CustomTokenizer(__snake_case )
a__ = CustomProcessor(__snake_case ,__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
a__ = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case ,__snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__( self :str ) -> Dict:
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : str = False
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Tuple = False
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = '''AutoFeatureExtractor'''
UpperCAmelCase__ : List[str] = '''AutoTokenizer'''
UpperCAmelCase__ : Optional[int] = False
try:
AutoConfig.register('custom' ,__snake_case )
AutoFeatureExtractor.register(__snake_case ,__snake_case )
AutoTokenizer.register(__snake_case ,slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case ,__snake_case )
# If remote code is not set, the default is to use local classes.
a__ = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
a__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
a__ = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' ,trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ ,'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCamelCase__( self :List[str] ) -> List[str]:
a__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ ,'BertTokenizerFast' )
def lowerCamelCase__( self :Dict ) -> int:
a__ = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ ,'ConvNextImageProcessor' )
@is_staging_test
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def lowerCamelCase__( cls :Optional[int] ) -> Tuple:
a__ = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCamelCase__( cls :Union[str, Any] ) -> List[str]:
try:
delete_repo(token=cls._token ,repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-processor' )
except HTTPError:
pass
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case ,'test-processor' ) ,push_to_hub=__snake_case ,use_auth_token=self._token )
a__ = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case ,getattr(new_processor.feature_extractor ,__snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def lowerCamelCase__( self :List[Any] ) -> List[Any]:
a__ = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case ,'test-processor-org' ) ,push_to_hub=__snake_case ,use_auth_token=self._token ,organization='valid_org' ,)
a__ = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case ,getattr(new_processor.feature_extractor ,__snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def lowerCamelCase__( self :int ) -> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
a__ = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
a__ = os.path.join(__snake_case ,'vocab.txt' )
with open(__snake_case ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
a__ = CustomTokenizer(__snake_case )
a__ = CustomProcessor(__snake_case ,__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' ,token=self._token )
a__ = Repository(__snake_case ,clone_from=F'{USER}/test-dynamic-processor' ,token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map ,{
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} ,)
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case ,'tokenizer_config.json' ) ) as f:
a__ = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config['auto_map'] ,{
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} ,)
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case ,'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case ,'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case ,'custom_processing.py' ) ) )
repo.push_to_hub()
a__ = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' ,trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ ,'CustomProcessor' )
| 657 |
def __lowercase ( __lowerCAmelCase : int ):
if length <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowerCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 657 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
__UpperCamelCase :int = True if '''large''' in model_name or '''huge''' in model_name else False
__UpperCamelCase :str = True if '''large''' in model_name or '''huge''' in model_name else False
__UpperCamelCase :Any = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__UpperCamelCase :int = [3, 3, 3, 3]
__UpperCamelCase :Optional[int] = [5, 5, 5, 5]
elif "fl4" in model_name:
__UpperCamelCase :str = [4, 4, 4, 4]
__UpperCamelCase :Dict = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__UpperCamelCase :List[Any] = [3, 3, 3, 3]
if "lrf" in model_name:
__UpperCamelCase :Optional[Any] = [3, 3, 3, 3]
else:
__UpperCamelCase :List[str] = [2, 2, 2, 2]
if "tiny" in model_name:
__UpperCamelCase :Union[str, Any] = 96
elif "small" in model_name:
__UpperCamelCase :Optional[Any] = 96
elif "base" in model_name:
__UpperCamelCase :Optional[int] = 128
elif "large" in model_name:
__UpperCamelCase :List[Any] = 192
elif "xlarge" in model_name:
__UpperCamelCase :Any = 256
elif "huge" in model_name:
__UpperCamelCase :str = 352
# set label information
__UpperCamelCase :Optional[int] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
__UpperCamelCase :int = '''imagenet-22k-id2label.json'''
else:
__UpperCamelCase :str = '''imagenet-1k-id2label.json'''
__UpperCamelCase :Dict = json.load(open(hf_hub_download(__a , __a , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase :int = {int(__a ): v for k, v in idalabel.items()}
__UpperCamelCase :int = {v: k for k, v in idalabel.items()}
__UpperCamelCase :List[Any] = FocalNetConfig(
embed_dim=__a , depths=__a , focal_levels=__a , focal_windows=__a , use_conv_embed=__a , idalabel=__a , labelaid=__a , use_post_layernorm=__a , use_layerscale=__a , )
return config
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if "patch_embed.proj" in name:
__UpperCamelCase :List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__UpperCamelCase :List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__UpperCamelCase :Tuple = '''encoder.''' + name
if "encoder.layers" in name:
__UpperCamelCase :Tuple = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
__UpperCamelCase :str = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
__UpperCamelCase :Dict = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__UpperCamelCase :Dict = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__UpperCamelCase :Union[str, Any] = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__UpperCamelCase :List[Any] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
__UpperCamelCase :Optional[Any] = '''layernorm.weight'''
if name == "norm.bias":
__UpperCamelCase :Union[str, Any] = '''layernorm.bias'''
if "head" in name:
__UpperCamelCase :Any = name.replace('''head''' , '''classifier''' )
else:
__UpperCamelCase :List[str] = '''focalnet.''' + name
return name
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
__UpperCamelCase :Union[str, Any] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , __a )
__UpperCamelCase :Any = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
__UpperCamelCase :List[Any] = state_dict.pop(__a )
__UpperCamelCase :List[Any] = val
__UpperCamelCase :List[Any] = get_focalnet_config(__a )
__UpperCamelCase :str = FocalNetForImageClassification(__a )
model.eval()
# load state dict
model.load_state_dict(__a )
# verify conversion
__UpperCamelCase :List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__UpperCamelCase :Tuple = BitImageProcessor(
do_resize=__a , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=__a , crop_size=224 , do_normalize=__a , image_mean=__a , image_std=__a , )
__UpperCamelCase :List[Any] = Image.open(requests.get(__a , stream=__a ).raw )
__UpperCamelCase :Any = processor(images=__a , return_tensors='''pt''' )
__UpperCamelCase :Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__UpperCamelCase :Tuple = image_transforms(__a ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __a , atol=1e-4 )
__UpperCamelCase :Optional[Any] = model(**__a )
__UpperCamelCase :Union[str, Any] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__UpperCamelCase :Optional[int] = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__UpperCamelCase :List[str] = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__UpperCamelCase :List[str] = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__UpperCamelCase :Tuple = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__UpperCamelCase :int = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__UpperCamelCase :List[str] = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
__lowercase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 167 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __magic_name__ ( __a : Union[str, Any] ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def __magic_name__ ( __a : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 513 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True})
_A = Features({'text': Value('string')})
_A = Features({'summary': Value('string')})
_A = 'text'
_A = 'summary'
@property
def _lowerCamelCase ( self :Dict ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 701 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int=32 , _lowerCamelCase : str=10 , _lowerCamelCase : Dict=100 , _lowerCamelCase : int=1_026 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : str="data/tokenized_stories_train_wikitext103.jbl" , _lowerCamelCase : Any="igf_context_pairs.jbl" , ) -> str:
'''simple docstring'''
set_seed(3)
# generate train_data and objective_set
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = generate_datasets(
_lowerCamelCase , _lowerCamelCase , number=_lowerCamelCase , min_len=1_026 , trim=_lowerCamelCase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__UpperCamelCase : Dict = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# load pretrained model
__UpperCamelCase : str = load_gpta("gpt2").to(_lowerCamelCase)
print("computing perplexity on objective set")
__UpperCamelCase : Union[str, Any] = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase).item()
print("perplexity on objective set:" , _lowerCamelCase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any=15 , _lowerCamelCase : Union[str, Any]=128 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]="igf_model.pt" , ) -> Any:
'''simple docstring'''
set_seed(42)
# Load pre-trained model
__UpperCamelCase : int = GPTaLMHeadModel.from_pretrained("gpt2")
# Initialize secondary learner to use embedding weights of model
__UpperCamelCase : Any = SecondaryLearner(_lowerCamelCase)
# Train secondary learner
__UpperCamelCase : Union[str, Any] = train_secondary_learner(
_lowerCamelCase , _lowerCamelCase , max_epochs=_lowerCamelCase , batch_size=_lowerCamelCase , eval_freq=100 , igf_model_path=_lowerCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int=32 , _lowerCamelCase : Tuple=1_000 , _lowerCamelCase : Dict=16 , _lowerCamelCase : Union[str, Any]=1.0 , _lowerCamelCase : Optional[Any]=recopy_gpta , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Union[str, Any]="gpt2_finetuned.pt" , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
__UpperCamelCase : List[Any] = RandomSampler(_lowerCamelCase)
__UpperCamelCase : Any = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase)
__UpperCamelCase : Tuple = max_steps // (len(_lowerCamelCase)) + 1
__UpperCamelCase : List[Any] = 0
__UpperCamelCase : List[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCamelCase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = recopy_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCamelCase)
secondary_learner.eval()
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : Any = 0
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Any = []
# Compute the performance of the transformer model at the beginning
__UpperCamelCase : str = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
test_perps.append(_lowerCamelCase)
print("Test perplexity, step" , _lowerCamelCase , ":" , _lowerCamelCase)
for epoch in range(int(_lowerCamelCase)):
for step, example in enumerate(_lowerCamelCase):
torch.cuda.empty_cache()
__UpperCamelCase : Optional[Any] = random.randint(0 , example.size(2) - context_len - 1)
__UpperCamelCase : Optional[Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__UpperCamelCase : List[Any] = model(_lowerCamelCase , labels=_lowerCamelCase)
__UpperCamelCase : int = True
if secondary_learner is not None:
__UpperCamelCase : Optional[int] = secondary_learner.forward(
torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase).unsqueeze(0))[0].item()
observed_qs.append(float(_lowerCamelCase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__UpperCamelCase : List[str] = -1
if predicted_q < threshold:
__UpperCamelCase : Optional[int] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
__UpperCamelCase : Optional[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__UpperCamelCase : str = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__UpperCamelCase : List[Any] = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
test_perps.append(_lowerCamelCase)
print("Test perplexity, step" , _lowerCamelCase , ":" , _lowerCamelCase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCamelCase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task")
# Required parameters
parser.add_argument(
"--data_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_lowerCamelCase , default=_lowerCamelCase , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_lowerCamelCase , type=_lowerCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_lowerCamelCase , default=_lowerCamelCase , help="A seed for reproducible training.")
parser.add_argument(
"--context_len" , default=32 , type=_lowerCamelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=_lowerCamelCase , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=_lowerCamelCase , help="secondary model evaluation is triggered at eval_freq")
parser.add_argument("--max_steps" , default=1_000 , type=_lowerCamelCase , help="To calculate training epochs")
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=_lowerCamelCase , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_lowerCamelCase , help="batch size of training data of language model(gpt2) ")
parser.add_argument(
"--eval_interval" , default=10 , type=_lowerCamelCase , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=_lowerCamelCase , help="The number of examples split to be used as objective_set/test_data")
parser.add_argument(
"--min_len" , default=1_026 , type=_lowerCamelCase , help="The minimum length of the article to be used as objective set")
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_lowerCamelCase , help="number of epochs to train secondary learner")
parser.add_argument("--trim" , default=_lowerCamelCase , type=_lowerCamelCase , help="truncate the example if it exceeds context length")
parser.add_argument(
"--threshold" , default=1.0 , type=_lowerCamelCase , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_lowerCamelCase , help="finetuned_model_name")
parser.add_argument(
"--recopy_model" , default=_lowerCamelCase , type=_lowerCamelCase , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=_lowerCamelCase , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
__UpperCamelCase : Any = joblib.load("data/IGF_values.jbl")
# Train secondary learner
__UpperCamelCase : Optional[Any] = training_secondary_learner(
_lowerCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
__UpperCamelCase : int = GPTaLMHeadModel.from_pretrained("gpt2")
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1_026 , trim=_lowerCamelCase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCamelCase , secondary_learner=_lowerCamelCase , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main() | 94 | 0 |
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
a_ = logging.getLogger(__name__)
class UpperCAmelCase_ ( snake_case ):
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]:
__lowercase : Tuple = self.layer[current_layer](UpperCamelCase_ , UpperCamelCase_ , head_mask[current_layer] )
__lowercase : Any = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , snake_case , )
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_ ) -> int:
super().__init__(UpperCamelCase_ )
__lowercase : Optional[Any] = BertEncoderWithPabee(UpperCamelCase_ )
self.init_weights()
__lowercase : str = 0
__lowercase : Optional[Any] = 0
__lowercase : Optional[int] = 0
__lowercase : int = 0
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict:
__lowercase : Tuple = threshold
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : Optional[int] = patience
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Tuple = 0
__lowercase : Tuple = 0
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Optional[int] = self.inference_layers_num / self.inference_instances_num
__lowercase : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , ) -> Union[str, Any]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
__lowercase : Tuple = input_ids.size()
elif inputs_embeds is not None:
__lowercase : List[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
__lowercase : int = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
__lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
__lowercase ,__lowercase ,__lowercase : Optional[int] = encoder_hidden_states.size()
__lowercase : Any = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
__lowercase : List[str] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
__lowercase : Tuple = self.invert_attention_mask(UpperCamelCase_ )
else:
__lowercase : Tuple = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
__lowercase : Optional[int] = self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
__lowercase : Union[str, Any] = embedding_output
if self.training:
__lowercase : List[Any] = []
for i in range(self.config.num_hidden_layers ):
__lowercase : str = self.encoder.adaptive_forward(
UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ )
__lowercase : int = self.pooler(UpperCamelCase_ )
__lowercase : str = output_layers[i](output_dropout(UpperCamelCase_ ) )
res.append(UpperCamelCase_ )
elif self.patience == 0: # Use all layers for inference
__lowercase : int = self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
__lowercase : Optional[Any] = self.pooler(encoder_outputs[0] )
__lowercase : int = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase_ )]
else:
__lowercase : Optional[int] = 0
__lowercase : Union[str, Any] = None
__lowercase : int = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
__lowercase : Tuple = self.encoder.adaptive_forward(
UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ )
__lowercase : Dict = self.pooler(UpperCamelCase_ )
__lowercase : Optional[int] = output_layers[i](UpperCamelCase_ )
if regression:
__lowercase : Any = logits.detach()
if patient_result is not None:
__lowercase : List[str] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
__lowercase : int = 0
else:
__lowercase : List[str] = logits.detach().argmax(dim=1 )
if patient_result is not None:
__lowercase : Optional[Any] = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase_ ) ):
patient_counter += 1
else:
__lowercase : Tuple = 0
__lowercase : Union[str, Any] = logits
if patient_counter == self.patience:
break
__lowercase : Optional[int] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , snake_case , )
class UpperCAmelCase_ ( snake_case ):
def __init__( self , UpperCamelCase_ ) -> Optional[Any]:
super().__init__(UpperCamelCase_ )
__lowercase : List[Any] = config.num_labels
__lowercase : int = BertModelWithPabee(UpperCamelCase_ )
__lowercase : int = nn.Dropout(config.hidden_dropout_prob )
__lowercase : Union[str, Any] = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ) -> int:
__lowercase : Union[str, Any] = self.bert(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
__lowercase : List[str] = (logits[-1],)
if labels is not None:
__lowercase : Any = None
__lowercase : Optional[int] = 0
for ix, logits_item in enumerate(UpperCamelCase_ ):
if self.num_labels == 1:
# We are doing regression
__lowercase : Any = MSELoss()
__lowercase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
__lowercase : str = CrossEntropyLoss()
__lowercase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
__lowercase : List[str] = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
__lowercase : Union[str, Any] = (total_loss / total_weights,) + outputs
return outputs
| 76 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
UpperCAmelCase_ : Optional[int] = n_copies
def __iter__( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = start_length
UpperCAmelCase_ : Optional[int] = eof_strings
UpperCAmelCase_ : str = tokenizer
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase )
UpperCAmelCase_ : Dict = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase )
UpperCAmelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = '''false'''
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : int = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' )
UpperCAmelCase_ : Dict = load_metric('''code_eval''' )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase )
UpperCAmelCase_ : int = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
UpperCAmelCase_ : Any = []
for task in tqdm(range(_lowercase ) ):
UpperCAmelCase_ : int = human_eval['''test'''][task]['''test''']
UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 30 | 0 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
__SCREAMING_SNAKE_CASE = 'src/transformers'
# Matches is_xxx_available()
__SCREAMING_SNAKE_CASE = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
__SCREAMING_SNAKE_CASE = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__SCREAMING_SNAKE_CASE = re.compile(R'\s+\"\S*\":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
__SCREAMING_SNAKE_CASE = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
__SCREAMING_SNAKE_CASE = re.compile(R'^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__SCREAMING_SNAKE_CASE = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
__SCREAMING_SNAKE_CASE = re.compile('^\s+\"([^\"]+)\",')
# Catches a line with objects between brackets only: ["foo", "bar"],
__SCREAMING_SNAKE_CASE = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
__SCREAMING_SNAKE_CASE = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
__SCREAMING_SNAKE_CASE = re.compile(R'^\s*try:')
# Catches a line with else:
__SCREAMING_SNAKE_CASE = re.compile(R'^\s*else:')
def __a ( lowerCAmelCase__ : List[str] ):
if _re_test_backend.search(__snake_case ) is None:
return None
a__ : Optional[Any] = [b[0] for b in _re_backend.findall(__snake_case )]
backends.sort()
return "_and_".join(__snake_case )
def __a ( lowerCAmelCase__ : List[Any] ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
a__ : Tuple = f.readlines()
a__ : Union[str, Any] = 0
while line_index < len(__snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
a__ : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
a__ : List[Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__snake_case ):
a__ : Tuple = _re_one_line_import_struct.search(__snake_case ).groups()[0]
a__ : str = re.findall('''\[([^\]]+)\]''' , __snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
a__ : str = _re_import_struct_key_value.search(__snake_case )
if single_line_import_search is not None:
a__ : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
a__ : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
a__ : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
a__ : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
a__ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
a__ : str = lines[line_index]
if _re_import_struct_add_one.search(__snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(__snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(__snake_case ) is not None:
a__ : str = _re_import_struct_add_many.search(__snake_case ).groups()[0].split(''', ''' )
a__ : int = [obj[1:-1] for obj in imports if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif _re_between_brackets.search(__snake_case ) is not None:
a__ : Dict = _re_between_brackets.search(__snake_case ).groups()[0].split(''', ''' )
a__ : int = [obj[1:-1] for obj in imports if len(__snake_case ) > 0]
objects.extend(__snake_case )
elif _re_quote_object.search(__snake_case ) is not None:
objects.append(_re_quote_object.search(__snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
a__ : int = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
a__ : int = []
while (
line_index < len(__snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
a__ : Optional[Any] = lines[line_index]
a__ : Union[str, Any] = _re_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
a__ : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(__snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
a__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
a__ : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
a__ : Optional[int] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
a__ : Optional[Any] = lines[line_index]
a__ : Optional[Any] = _re_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
a__ : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __a ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ):
def find_duplicates(lowerCAmelCase__ : Dict ):
return [k for k, v in collections.Counter(__snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
a__ : Optional[Any] = []
for key in import_dict_objects.keys():
a__ : Optional[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
a__ : Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
a__ : List[Any] = '''base imports''' if key == '''none''' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def __a ( ):
a__ : str = []
for root, _, files in os.walk(__snake_case ):
if "__init__.py" in files:
a__ : Optional[int] = os.path.join(__snake_case , '''__init__.py''' )
a__ : str = parse_init(__snake_case )
if objects is not None:
a__ : Any = analyze_results(*__snake_case )
if len(__snake_case ) > 0:
a__ : Tuple = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('''\n'''.join(__snake_case ) )
if len(__snake_case ) > 0:
raise ValueError('''\n\n'''.join(__snake_case ) )
def __a ( ):
a__ : int = []
for path, directories, files in os.walk(__snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(__snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
a__ : List[Any] = str((Path(__snake_case ) / folder).relative_to(__snake_case ) )
a__ : Tuple = short_path.replace(os.path.sep , '''.''' )
submodules.append(__snake_case )
for fname in files:
if fname == "__init__.py":
continue
a__ : Any = str((Path(__snake_case ) / fname).relative_to(__snake_case ) )
a__ : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(__snake_case )
return submodules
__SCREAMING_SNAKE_CASE = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def __a ( ):
a__ : Optional[int] = importlib.util.spec_from_file_location(
'''transformers''' , os.path.join(__snake_case , '''__init__.py''' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
a__ : List[str] = spec.loader.load_module()
a__ : Optional[int] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__snake_case ) > 0:
a__ : str = '''\n'''.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registered in the main init of Transformers:\n'''
F'{list_of_modules}\n'
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 712 |
'''simple docstring'''
import numpy as np
def __a ( lowerCAmelCase__ : np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 340 | 0 |
"""simple docstring"""
A: List[str] = 'Alexander Joslin'
import operator as op
from .stack import Stack
def _snake_case ( UpperCamelCase : Union[str, Any] ):
UpperCAmelCase : Dict = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
UpperCAmelCase : Stack[int] = Stack()
UpperCAmelCase : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowercase__ ) )
elif i in operators:
# RULE 2
operator_stack.push(lowercase__ )
elif i == ")":
# RULE 4
UpperCAmelCase : List[Any] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : Any = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase : int = operators[opr](lowercase__ , lowercase__ )
operand_stack.push(lowercase__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
A: str = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 160 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , ):
UpperCamelCase__ = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = apply_ocr
def _lowerCamelCase ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowerCamelCase ( self ):
UpperCamelCase__ = LayoutLMvaImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """apply_ocr""" ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __lowerCAmelCase )
self.assertIsInstance(encoding.boxes , __lowerCAmelCase )
# Test batched
UpperCamelCase__ = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase__ = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase__ = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def _lowerCamelCase ( self ):
# with apply_OCR = True
UpperCamelCase__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
UpperCamelCase__ = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
UpperCamelCase__ = image_processing(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
UpperCamelCase__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowerCAmelCase )
self.assertListEqual(encoding.boxes , __lowerCAmelCase )
# with apply_OCR = False
UpperCamelCase__ = LayoutLMvaImageProcessor(apply_ocr=__lowerCAmelCase )
UpperCamelCase__ = image_processing(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 548 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : Any = MODEL_FOR_MASKED_LM_MAPPING
snake_case : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def _lowerCamelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25506, """token_str""": """ accuser"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 38015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 25506,
"""token_str""": """ accuser""",
},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16416, """token_str""": """ELS"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16416, """token_str""": """ELS"""},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13606, """token_str""": """ Clara"""},
] , )
UpperCamelCase__ = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
UpperCamelCase__ = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(__lowerCAmelCase )
@slow
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12790,
"""token_str""": """ Lyon""",
},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
UpperCamelCase__ = None
UpperCamelCase__ = None
self.run_pipeline_test(__lowerCAmelCase , [] )
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
UpperCamelCase__ = None
UpperCamelCase__ = None
self.run_pipeline_test(__lowerCAmelCase , [] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = fill_masker.tokenizer
UpperCamelCase__ = fill_masker.model
UpperCamelCase__ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
__lowerCAmelCase , [
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
] , )
with self.assertRaises(__lowerCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__lowerCAmelCase ):
fill_masker("""This is""" )
self.run_test_top_k(__lowerCAmelCase , __lowerCAmelCase )
self.run_test_targets(__lowerCAmelCase , __lowerCAmelCase )
self.run_test_top_k_targets(__lowerCAmelCase , __lowerCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(__lowerCAmelCase , __lowerCAmelCase )
self.fill_mask_with_multiple_masks(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = tokenizer.get_vocab()
UpperCamelCase__ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , targets=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase )
UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase ) )
# Call argument
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase )
UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase ) )
# Score equivalence
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
UpperCamelCase__ = [top_mask["""token_str"""] for top_mask in outputs]
UpperCamelCase__ = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowerCAmelCase ) == set(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
UpperCamelCase__ = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
# Raises with invalid
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""""""] )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="""""" )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , top_k=2 )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = tokenizer.get_vocab()
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase__ = sorted(vocab.keys() )[:3]
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__lowerCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase__ = [el["""token_str"""] for el in sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowerCAmelCase ).issubset(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__lowerCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase__ = sorted(vocab.keys() )[:3]
UpperCamelCase__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase__ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=__lowerCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__lowerCAmelCase ) , 3 )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
] , )
| 548 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase = logging.get_logger(__name__)
def a__ ( UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : int ):
UpperCAmelCase__ :Dict = b.T
UpperCAmelCase__ :str = np.sum(np.square(UpperCamelCase_ ), axis=1 )
UpperCAmelCase__ :Dict = np.sum(np.square(UpperCamelCase_ ), axis=0 )
UpperCAmelCase__ :Dict = np.matmul(UpperCamelCase_, UpperCamelCase_ )
UpperCAmelCase__ :List[str] = aa[:, None] - 2 * ab + ba[None, :]
return d
def a__ ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Any ):
UpperCAmelCase__ :Union[str, Any] = x.reshape(-1, 3 )
UpperCAmelCase__ :Tuple = squared_euclidean_distance(UpperCamelCase_, UpperCamelCase_ )
return np.argmin(UpperCamelCase_, axis=1 )
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = ["pixel_values"]
def __init__( self : List[str] , __lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , **__lowerCamelCase : List[Any] , ):
super().__init__(**__lowerCamelCase )
UpperCAmelCase__ :int = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6}
UpperCAmelCase__ :Tuple = get_size_dict(__lowerCamelCase )
UpperCAmelCase__ :Dict = np.array(__lowerCamelCase ) if clusters is not None else None
UpperCAmelCase__ :List[str] = do_resize
UpperCAmelCase__ :Union[str, Any] = size
UpperCAmelCase__ :List[str] = resample
UpperCAmelCase__ :Tuple = do_normalize
UpperCAmelCase__ :Tuple = do_color_quantize
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ):
UpperCAmelCase__ :int = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' )
return resize(
__lowerCamelCase , size=(size['''height'''], size['''width''']) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , ):
UpperCAmelCase__ :Optional[Any] = rescale(image=__lowerCamelCase , scale=1 / 1_27.5 , data_format=__lowerCamelCase )
UpperCAmelCase__ :str = image - 1
return image
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[Union[List[List[int]], np.ndarray]] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__lowerCamelCase : Tuple , ):
UpperCAmelCase__ :Any = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ :Optional[Any] = size if size is not None else self.size
UpperCAmelCase__ :Tuple = get_size_dict(__lowerCamelCase )
UpperCAmelCase__ :int = resample if resample is not None else self.resample
UpperCAmelCase__ :Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ :Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCAmelCase__ :List[Any] = clusters if clusters is not None else self.clusters
UpperCAmelCase__ :Dict = np.array(__lowerCamelCase )
UpperCAmelCase__ :Union[str, Any] = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_color_quantize and clusters is None:
raise ValueError('''Clusters must be specified if do_color_quantize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ :List[str] = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
UpperCAmelCase__ :Tuple = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_normalize:
UpperCAmelCase__ :int = [self.normalize(image=__lowerCamelCase ) for image in images]
if do_color_quantize:
UpperCAmelCase__ :List[str] = [to_channel_dimension_format(__lowerCamelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCAmelCase__ :List[str] = np.array(__lowerCamelCase )
UpperCAmelCase__ :Dict = color_quantize(__lowerCamelCase , __lowerCamelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCAmelCase__ :Optional[Any] = images.shape[0]
UpperCAmelCase__ :List[Any] = images.reshape(__lowerCamelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCAmelCase__ :Any = list(__lowerCamelCase )
else:
UpperCAmelCase__ :int = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
UpperCAmelCase__ :List[Any] = {'''input_ids''': images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 467 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = False, False, False
@dataclass
class UpperCAmelCase :
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = None
# Automatically constructed
UpperCAmelCase = "dict"
UpperCAmelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCAmelCase = field(default="Audio" , init=_snake_case , repr=_snake_case )
def __call__( self : Any ):
return self.pa_type
def __SCREAMING_SNAKE_CASE ( self : Dict , __lowerCamelCase : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": None, "path": value}
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
UpperCAmelCase__ :int = BytesIO()
sf.write(__lowerCamelCase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
UpperCAmelCase__ :List[Any] = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
UpperCAmelCase__ :Optional[Any] = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2_7_6_7
UpperCAmelCase__ :Optional[Any] = BytesIO(bytes() )
sf.write(__lowerCamelCase , __lowerCamelCase , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCamelCase : dict , __lowerCamelCase : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
UpperCAmelCase__ , UpperCAmelCase__ :str = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
UpperCAmelCase__ :List[str] = xsplitext(__lowerCamelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
UpperCAmelCase__ :Optional[Any] = token_per_repo_id or {}
UpperCAmelCase__ :str = path.split('''::''' )[-1]
try:
UpperCAmelCase__ :Tuple = string_to_dict(__lowerCamelCase , config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase__ :str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
UpperCAmelCase__ :Tuple = None
with xopen(__lowerCamelCase , '''rb''' , use_auth_token=__lowerCamelCase ) as f:
UpperCAmelCase__ , UpperCAmelCase__ :Union[str, Any] = sf.read(__lowerCamelCase )
else:
UpperCAmelCase__ , UpperCAmelCase__ :List[Any] = sf.read(__lowerCamelCase )
UpperCAmelCase__ :Optional[int] = array.T
if self.mono:
UpperCAmelCase__ :Any = librosa.to_mono(__lowerCamelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
UpperCAmelCase__ :Union[str, Any] = librosa.resample(__lowerCamelCase , orig_sr=__lowerCamelCase , target_sr=self.sampling_rate )
UpperCAmelCase__ :List[str] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def __SCREAMING_SNAKE_CASE ( self : Any , __lowerCamelCase : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
UpperCAmelCase__ :List[str] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
UpperCAmelCase__ :Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ :str = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ :int = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
UpperCAmelCase__ :Any = pa.array([Audio().encode_example(__lowerCamelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase__ :str = storage.field('''bytes''' )
else:
UpperCAmelCase__ :List[str] = pa.array([None] * len(__lowerCamelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase__ :Optional[int] = storage.field('''path''' )
else:
UpperCAmelCase__ :Optional[int] = pa.array([None] * len(__lowerCamelCase ) , type=pa.string() )
UpperCAmelCase__ :List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCamelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__lowerCamelCase : Dict ):
with xopen(__lowerCamelCase , '''rb''' ) as f:
UpperCAmelCase__ :Any = f.read()
return bytes_
UpperCAmelCase__ :Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ :Optional[int] = pa.array(
[os.path.basename(__lowerCamelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
UpperCAmelCase__ :Optional[int] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__lowerCamelCase , self.pa_type )
| 467 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = """▁"""
snake_case = {"""vocab_file""": """sentencepiece.bpe.model"""}
snake_case = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
snake_case = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1_024,
}
# fmt: off
snake_case = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : int = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Any = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ : List[int] = []
UpperCamelCase_ : List[int] = []
def __init__( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : str="</s>" , UpperCAmelCase_ : Any="</s>" , UpperCAmelCase_ : Optional[int]="<s>" , UpperCAmelCase_ : Any="<unk>" , UpperCAmelCase_ : List[str]="<pad>" , UpperCAmelCase_ : Union[str, Any]="<mask>" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE : str = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
SCREAMING_SNAKE_CASE : int = {} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE : Any = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE : Tuple = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE : Tuple = 1
SCREAMING_SNAKE_CASE : int = len(self.sp_model )
SCREAMING_SNAKE_CASE : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(UpperCAmelCase_ )
}
SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE : Tuple = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE : str = src_lang if src_lang is not None else "en_XX"
SCREAMING_SNAKE_CASE : Any = self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _A ( self : str ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _A ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def _A ( self : List[str] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Tuple = None
return state
def __setstate__( self : Union[str, Any] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self : Optional[Any] , UpperCAmelCase_ : str ):
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def _A ( self : str , UpperCAmelCase_ : str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE : int = self.sp_model.PieceToId(UpperCAmelCase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _A ( self : List[str] , UpperCAmelCase_ : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Optional[int] = ""
SCREAMING_SNAKE_CASE : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : List[Any] = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def _A ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : Dict = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , "wb" ) as fi:
SCREAMING_SNAKE_CASE : List[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
def _A ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(UpperCAmelCase_ )) + ([0] * len(UpperCAmelCase_ )) + suffix_ones
def _A ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _A ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] , UpperCAmelCase_ : Optional[str] , **UpperCAmelCase_ : List[str] ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
SCREAMING_SNAKE_CASE : Dict = src_lang
SCREAMING_SNAKE_CASE : List[str] = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.convert_tokens_to_ids(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = tgt_lang_id
return inputs
def _A ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str = "en_XX" , UpperCAmelCase_ : Optional[List[str]] = None , UpperCAmelCase_ : str = "ro_RO" , **UpperCAmelCase_ : Optional[Any] , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = src_lang
SCREAMING_SNAKE_CASE : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : str ):
return self.set_src_lang_special_tokens(self.src_lang )
def _A ( self : int ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _A ( self : Optional[int] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : List[Any] = self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE : Dict = [self.cur_lang_code_id]
SCREAMING_SNAKE_CASE : Tuple = [self.eos_token_id]
def _A ( self : Union[str, Any] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE : Optional[int] = [self.cur_lang_code_id]
SCREAMING_SNAKE_CASE : Tuple = [self.eos_token_id]
| 488 |
import qiskit
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE : Dict = qiskit.QuantumCircuit(lowercase , lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
SCREAMING_SNAKE_CASE : Union[str, Any] = qiskit.execute(lowercase , lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 488 | 1 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE = 1000 ) ->int:
"""simple docstring"""
lowerCAmelCase__ :List[str] = 3
lowerCAmelCase__ :str = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 93 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase: Optional[int] = logging.get_logger(__name__)
lowerCAmelCase: int = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class a__( lowerCamelCase__ ):
lowercase__ = """ctrl"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] , __snake_case : Union[str, Any]=24_65_34 , __snake_case : Dict=2_56 , __snake_case : Optional[int]=12_80 , __snake_case : Optional[int]=81_92 , __snake_case : Union[str, Any]=48 , __snake_case : str=16 , __snake_case : Any=0.1 , __snake_case : List[Any]=0.1 , __snake_case : str=1e-6 , __snake_case : List[str]=0.02 , __snake_case : int=True , **__snake_case : Any , ):
a : int = vocab_size
a : Union[str, Any] = n_positions
a : Optional[int] = n_embd
a : Dict = n_layer
a : Union[str, Any] = n_head
a : Tuple = dff
a : Union[str, Any] = resid_pdrop
a : Tuple = embd_pdrop
a : int = layer_norm_epsilon
a : str = initializer_range
a : Any = use_cache
super().__init__(**__snake_case ) | 526 | 0 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCAmelCase = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCAmelCase = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCAmelCase = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION)
class __snake_case ( datasets.Metric):
'''simple docstring'''
def _a ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def _a ( self , a_ , a_ , a_=None , a_=None , a_=None , a_=None , a_="auto" , a_=-1 , a_=0.9 , a_=5 , a_=500 , a_="gpt2-large" , a_=-1 , a_=1_024 , a_=25 , a_=5 , a_=True , a_=25 , ):
a__ = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 703 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
"""configuration_xlm_roberta""": [
"""XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaConfig""",
"""XLMRobertaOnnxConfig""",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""XLMRobertaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""XLMRobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaForCausalLM""",
"""XLMRobertaForMaskedLM""",
"""XLMRobertaForMultipleChoice""",
"""XLMRobertaForQuestionAnswering""",
"""XLMRobertaForSequenceClassification""",
"""XLMRobertaForTokenClassification""",
"""XLMRobertaModel""",
"""XLMRobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMRobertaForCausalLM""",
"""TFXLMRobertaForMaskedLM""",
"""TFXLMRobertaForMultipleChoice""",
"""TFXLMRobertaForQuestionAnswering""",
"""TFXLMRobertaForSequenceClassification""",
"""TFXLMRobertaForTokenClassification""",
"""TFXLMRobertaModel""",
"""TFXLMRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FlaxXLMRobertaForMaskedLM""",
"""FlaxXLMRobertaForCausalLM""",
"""FlaxXLMRobertaForMultipleChoice""",
"""FlaxXLMRobertaForQuestionAnswering""",
"""FlaxXLMRobertaForSequenceClassification""",
"""FlaxXLMRobertaForTokenClassification""",
"""FlaxXLMRobertaModel""",
"""FlaxXLMRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _a ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _a ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict ):
"""simple docstring"""
snake_case__ : List[Any] = tmp_path / '''cache'''
snake_case__ : int = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : Any = TextDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_text_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def _a ( __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
snake_case__ : List[str] = tmp_path / '''cache'''
snake_case__ : Union[str, Any] = {'''text''': '''string'''}
snake_case__ : Dict = features.copy() if features else default_expected_features
snake_case__ : str = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Dict = TextDatasetReader(__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_text_dataset(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _a ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
snake_case__ : str = tmp_path / '''cache'''
snake_case__ : Optional[Any] = {'''text''': '''string'''}
snake_case__ : int = TextDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase , split=__lowerCAmelCase ).read()
_check_text_dataset(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def _a ( __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Any ):
"""simple docstring"""
if issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Optional[int] = text_path
elif issubclass(__lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : List[str] = [text_path]
snake_case__ : List[Any] = tmp_path / '''cache'''
snake_case__ : Tuple = {'''text''': '''string'''}
snake_case__ : Dict = TextDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_text_dataset(__lowerCAmelCase , __lowerCAmelCase )
def _a ( __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any=("train",) ):
"""simple docstring"""
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
for split in splits:
snake_case__ : Tuple = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def _a ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
snake_case__ : Union[str, Any] = tmp_path / '''cache'''
snake_case__ : Any = {'''text''': '''string'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case__ : List[Any] = TextDatasetReader({'''train''': text_path} , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_text_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''text''': '''string'''},
{'''text''': '''int32'''},
{'''text''': '''float32'''},
] , )
def _a ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
snake_case__ : Tuple = tmp_path / '''cache'''
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case__ : List[Any] = {'''text''': '''string'''}
snake_case__ : List[Any] = features.copy() if features else default_expected_features
snake_case__ : List[str] = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case__ : Optional[int] = TextDatasetReader({'''train''': text_path} , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_text_datasetdict(__lowerCAmelCase , __lowerCAmelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def _a ( __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if split:
snake_case__ : List[Any] = {split: text_path}
else:
snake_case__ : Optional[int] = '''train'''
snake_case__ : Dict = {'''train''': text_path, '''test''': text_path}
snake_case__ : str = tmp_path / '''cache'''
snake_case__ : Optional[Any] = {'''text''': '''string'''}
snake_case__ : Tuple = TextDatasetReader(__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_text_datasetdict(__lowerCAmelCase , __lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 347 |
'''simple docstring'''
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCAmelCase__ : str = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Union[str, Any] , **snake_case_ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**snake_case_ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(snake_case_ )
def __magic_name__ ( self : Dict , **snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : Tuple = {}
snake_case__ : Dict = {}
snake_case__ : Union[str, Any] = {}
# preprocess args
if "points_per_batch" in kwargs:
snake_case__ : Any = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
snake_case__ : List[str] = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
snake_case__ : int = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
snake_case__ : int = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
snake_case__ : List[str] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
snake_case__ : Union[str, Any] = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
snake_case__ : int = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
snake_case__ : Optional[Any] = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
snake_case__ : List[str] = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
snake_case__ : int = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
snake_case__ : str = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
snake_case__ : Union[str, Any] = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : str , snake_case_ : Optional[int] , *snake_case_ : Any , snake_case_ : str=None , snake_case_ : List[Any]=None , **snake_case_ : List[Any] ):
'''simple docstring'''
return super().__call__(snake_case_ , *snake_case_ , num_workers=snake_case_ , batch_size=snake_case_ , **snake_case_ )
def __magic_name__ ( self : List[str] , snake_case_ : Any , snake_case_ : Optional[Any]=6_4 , snake_case_ : int = 0 , snake_case_ : float = 5_1_2 / 1_5_0_0 , snake_case_ : Optional[int] = 3_2 , snake_case_ : Optional[int] = 1 , ):
'''simple docstring'''
snake_case__ : Dict = load_image(snake_case_ )
snake_case__ : Optional[int] = self.image_processor.size['''longest_edge''']
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self.image_processor.generate_crop_boxes(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = self.image_processor(images=snake_case_ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
snake_case__ : List[str] = self.get_inference_context()
with inference_context():
snake_case__ : Dict = self._ensure_tensor_on_device(snake_case_ , device=self.device )
snake_case__ : Tuple = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
snake_case__ : str = image_embeddings
snake_case__ : Dict = grid_points.shape[1]
snake_case__ : int = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , snake_case_ , snake_case_ ):
snake_case__ : str = grid_points[:, i : i + points_per_batch, :, :]
snake_case__ : Optional[Any] = input_labels[:, i : i + points_per_batch]
snake_case__ : List[str] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def __magic_name__ ( self : List[str] , snake_case_ : List[str] , snake_case_ : Optional[Any]=0.8_8 , snake_case_ : Dict=0.9_5 , snake_case_ : List[str]=0 , snake_case_ : Dict=1 , ):
'''simple docstring'''
snake_case__ : Union[str, Any] = model_inputs.pop('''input_boxes''' )
snake_case__ : Union[str, Any] = model_inputs.pop('''is_last''' )
snake_case__ : List[str] = model_inputs.pop('''original_sizes''' ).tolist()
snake_case__ : int = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
snake_case__ : List[Any] = self.model(**snake_case_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
snake_case__ : Optional[int] = model_outputs['''pred_masks''']
snake_case__ : Optional[Any] = self.image_processor.post_process_masks(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , binarize=snake_case_ )
snake_case__ : str = model_outputs['''iou_scores''']
snake_case__ , snake_case__ , snake_case__ : Any = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def __magic_name__ ( self : List[str] , snake_case_ : Optional[int] , snake_case_ : List[str]=False , snake_case_ : int=False , snake_case_ : Tuple=0.7 , ):
'''simple docstring'''
snake_case__ : Tuple = []
snake_case__ : str = []
snake_case__ : Optional[int] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
snake_case__ : Union[str, Any] = torch.cat(snake_case_ )
snake_case__ : Dict = torch.cat(snake_case_ )
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = self.image_processor.post_process_for_mask_generation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Tuple = defaultdict(snake_case_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(snake_case_ )
snake_case__ : str = {}
if output_rle_mask:
snake_case__ : Union[str, Any] = rle_mask
if output_bboxes_mask:
snake_case__ : Union[str, Any] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 347 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __A :
UpperCamelCase = LEDConfig
UpperCamelCase = {}
UpperCamelCase = """gelu"""
def __init__( self :Optional[int] , __snake_case :Optional[int] , __snake_case :Optional[Any]=13 , __snake_case :List[Any]=7 , __snake_case :List[Any]=True , __snake_case :Optional[Any]=False , __snake_case :Optional[Any]=99 , __snake_case :Optional[Any]=32 , __snake_case :Tuple=2 , __snake_case :Optional[Any]=4 , __snake_case :Optional[int]=37 , __snake_case :Tuple=0.1 , __snake_case :str=0.1 , __snake_case :Optional[int]=20 , __snake_case :List[str]=2 , __snake_case :List[Any]=1 , __snake_case :List[Any]=0 , __snake_case :Tuple=4 , ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =parent
__magic_name__ : Tuple =batch_size
__magic_name__ : Optional[Any] =seq_length
__magic_name__ : Dict =is_training
__magic_name__ : Any =use_labels
__magic_name__ : str =vocab_size
__magic_name__ : str =hidden_size
__magic_name__ : Dict =num_hidden_layers
__magic_name__ : int =num_attention_heads
__magic_name__ : Tuple =intermediate_size
__magic_name__ : List[str] =hidden_dropout_prob
__magic_name__ : Tuple =attention_probs_dropout_prob
__magic_name__ : List[Any] =max_position_embeddings
__magic_name__ : Optional[int] =eos_token_id
__magic_name__ : Dict =pad_token_id
__magic_name__ : str =bos_token_id
__magic_name__ : Tuple =attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__magic_name__ : Optional[Any] =self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__magic_name__ : List[Any] =(
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ : str =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ : int =tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__magic_name__ : Optional[int] =prepare_led_inputs_dict(__snake_case , __snake_case , __snake_case )
__magic_name__ : str =tf.concat(
[tf.zeros_like(__snake_case )[:, :-1], tf.ones_like(__snake_case )[:, -1:]] , axis=-1 , )
__magic_name__ : int =global_attention_mask
return config, inputs_dict
def A__ ( self :Dict , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Dict =TFLEDModel(config=__snake_case ).get_decoder()
__magic_name__ : Tuple =inputs_dict["""input_ids"""]
__magic_name__ : Union[str, Any] =input_ids[:1, :]
__magic_name__ : Tuple =inputs_dict["""attention_mask"""][:1, :]
__magic_name__ : Optional[int] =1
# first forward pass
__magic_name__ : Any =model(__snake_case , attention_mask=__snake_case , use_cache=__snake_case )
__magic_name__ , __magic_name__ : str =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__magic_name__ : Any =ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ : List[Any] =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__magic_name__ : Tuple =tf.concat([input_ids, next_tokens] , axis=-1 )
__magic_name__ : Union[str, Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__magic_name__ : Any =model(__snake_case , attention_mask=__snake_case )[0]
__magic_name__ : List[str] =model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__magic_name__ : List[Any] =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__magic_name__ : str =output_from_no_past[:, -3:, random_slice_idx]
__magic_name__ : List[Any] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__snake_case , __snake_case , rtol=1E-3 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__magic_name__ : List[Any] =tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ : Union[str, Any] =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ : Any =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =TFLEDModelTester(self )
__magic_name__ : Tuple =ConfigTester(self , config_class=__snake_case )
def A__ ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : str =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ , __magic_name__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Union[str, Any] =tf.zeros_like(inputs_dict["""attention_mask"""] )
__magic_name__ : Tuple =2
__magic_name__ : List[Any] =tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
__magic_name__ : List[str] =True
__magic_name__ : List[str] =self.model_tester.seq_length
__magic_name__ : Optional[Any] =self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__snake_case :Optional[Any] ):
__magic_name__ : Optional[Any] =outputs.decoder_attentions
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__snake_case :Any ):
__magic_name__ : Tuple =[t.numpy() for t in outputs.encoder_attentions]
__magic_name__ : Optional[Any] =[t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__magic_name__ : int =True
__magic_name__ : Tuple =False
__magic_name__ : Any =False
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : List[Any] =model(self._prepare_for_class(__snake_case , __snake_case ) )
__magic_name__ : Dict =len(__snake_case )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
if self.is_encoder_decoder:
__magic_name__ : Any =model_class(__snake_case )
__magic_name__ : Any =model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_decoder_attentions_output(__snake_case )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Any =True
__magic_name__ : str =model_class(__snake_case )
__magic_name__ : str =model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
# Check attention is always last and order is fine
__magic_name__ : List[Any] =True
__magic_name__ : Dict =True
__magic_name__ : Tuple =model_class(__snake_case )
__magic_name__ : int =model(self._prepare_for_class(__snake_case , __snake_case ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__snake_case ) )
self.assertEqual(model.config.output_hidden_states , __snake_case )
check_encoder_attentions_output(__snake_case )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def A__ ( self :int ):
'''simple docstring'''
pass
def A__ ( self :Optional[int] ):
'''simple docstring'''
pass
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.constant(lowerCamelCase , dtype=tf.intaa )
UpperCAmelCase_ : Optional[Any] = 1e-4
@slow
@require_tf
class __A ( unittest.TestCase ):
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
__magic_name__ : Dict =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__magic_name__ : str =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__magic_name__ : List[Any] =prepare_led_inputs_dict(model.config , __snake_case , __snake_case )
__magic_name__ : Optional[int] =model(**__snake_case )[0]
__magic_name__ : str =(1, 10_24, 7_68)
self.assertEqual(output.shape , __snake_case )
# change to expected output here
__magic_name__ : str =tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1E-3 )
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Dict =TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
__magic_name__ : Any =_long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__magic_name__ : Tuple =_long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
__magic_name__ : Any =prepare_led_inputs_dict(model.config , __snake_case , __snake_case )
__magic_name__ : Dict =model(**__snake_case )[0]
__magic_name__ : str =(1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , __snake_case )
# change to expected output here
__magic_name__ : Any =tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __snake_case , atol=1E-3 , rtol=1E-3 )
| 367 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
def run_func(lowerCamelCase ):
@wraps(lowerCamelCase )
def run_in_eager_mode(*lowerCamelCase , **lowerCamelCase ):
return func(*lowerCamelCase , **lowerCamelCase )
@wraps(lowerCamelCase )
@tf.function(experimental_compile=lowerCamelCase )
def run_in_graph_mode(*lowerCamelCase , **lowerCamelCase ):
return func(*lowerCamelCase , **lowerCamelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : Dict =random.Random()
__magic_name__ : Union[str, Any] =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCamelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __A ( UpperCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = "TensorFlow"
@property
def A__ ( self :str ):
'''simple docstring'''
return tf.__version__
def A__ ( self :str , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ : Union[str, Any] =self._prepare_inference_func(__snake_case , __snake_case , __snake_case )
return self._measure_speed(_inference )
def A__ ( self :int , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Tuple =self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ : Any =self._prepare_train_func(__snake_case , __snake_case , __snake_case )
return self._measure_speed(_train )
def A__ ( self :str , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __snake_case )
__magic_name__ : int =self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ : Tuple =self._prepare_inference_func(__snake_case , __snake_case , __snake_case )
return self._measure_memory(_inference )
def A__ ( self :str , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __snake_case )
__magic_name__ : Tuple =self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__magic_name__ : Any =self._prepare_train_func(__snake_case , __snake_case , __snake_case )
return self._measure_memory(_train )
def A__ ( self :int , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : int =self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__magic_name__ : Any =(
hasattr(__snake_case , """architectures""" )
and isinstance(config.architectures , __snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__magic_name__ : Optional[int] ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__magic_name__ : Optional[int] =__import__("""transformers""" , fromlist=[model_class] )
__magic_name__ : Optional[Any] =getattr(__snake_case , __snake_case )
__magic_name__ : Optional[Any] =model_cls(__snake_case )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__magic_name__ : Optional[int] =TF_MODEL_MAPPING[config.__class__](__snake_case )
# encoder-decoder has vocab size saved differently
__magic_name__ : List[str] =config.vocab_size if hasattr(__snake_case , """vocab_size""" ) else config.encoder.vocab_size
__magic_name__ : Any =random_input_ids(__snake_case , __snake_case , __snake_case )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__snake_case , decoder_input_ids=__snake_case , training=__snake_case )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__snake_case , training=__snake_case )
__magic_name__ : Tuple =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def A__ ( self :int , __snake_case :str , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Tuple =self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__magic_name__ : int =(
hasattr(__snake_case , """architectures""" )
and isinstance(config.architectures , __snake_case )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__magic_name__ : List[Any] ="""TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__magic_name__ : Optional[int] =__import__("""transformers""" , fromlist=[model_class] )
__magic_name__ : str =getattr(__snake_case , __snake_case )
__magic_name__ : List[Any] =model_cls(__snake_case )
except ImportError:
raise ImportError(
f"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__magic_name__ : int =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__snake_case )
# encoder-decoder has vocab size saved differently
__magic_name__ : int =config.vocab_size if hasattr(__snake_case , """vocab_size""" ) else config.encoder.vocab_size
__magic_name__ : Optional[Any] =random_input_ids(__snake_case , __snake_case , __snake_case )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__magic_name__ : List[str] =model(__snake_case , decoder_input_ids=__snake_case , labels=__snake_case , training=__snake_case )[0]
__magic_name__ : int =tf.gradients(__snake_case , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__magic_name__ : str =model(__snake_case , labels=__snake_case , training=__snake_case )[0]
__magic_name__ : int =tf.gradients(__snake_case , model.trainable_variables )
return gradients
__magic_name__ : Union[str, Any] =encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def A__ ( self :Any , __snake_case :Union[str, Any] ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(__snake_case , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__magic_name__ : Union[str, Any] =timeit.repeat(
__snake_case , repeat=self.args.repeat , number=10 , )
return min(__snake_case ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
def A__ ( self :Any , __snake_case :Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
__magic_name__ : Union[str, Any] =start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
__magic_name__ : str ="""N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
__magic_name__ : List[str] =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__magic_name__ : Tuple =nvml.nvmlDeviceGetMemoryInfo(__snake_case )
__magic_name__ : Any =meminfo.used
__magic_name__ : str =Memory(__snake_case )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
__magic_name__ : List[str] =None
else:
__magic_name__ : List[Any] =measure_peak_memory_cpu(__snake_case )
__magic_name__ : str =Memory(__snake_case ) if isinstance(__snake_case , __snake_case ) else memory_bytes
if self.args.trace_memory_line_by_line:
__magic_name__ : List[Any] =stop_memory_tracing(__snake_case )
if memory is None:
__magic_name__ : Any =summary.total
else:
__magic_name__ : Optional[Any] =None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"Doesn't fit on GPU. {e}" )
return "N/A", None
| 367 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase :
"""simple docstring"""
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
return None
class lowerCAmelCase :
"""simple docstring"""
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return None
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__lowercase :List[str] = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase__ , '''tf''' , 12 , **UpperCamelCase__ )
@require_torch
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase__ , '''pt''' , 12 , **UpperCamelCase__ )
@require_torch
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import BertModel
lowerCamelCase_ = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words''']
with NamedTemporaryFile(mode='''w+t''' ) as vocab_file:
vocab_file.write('''\n'''.join(UpperCamelCase__ ) )
vocab_file.flush()
lowerCamelCase_ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase_ = BertModel(BertConfig(vocab_size=len(UpperCamelCase__ ) ) )
model.save_pretrained(UpperCamelCase__ )
self._test_export(UpperCamelCase__ , '''pt''' , 12 , UpperCamelCase__ )
@require_tf
@slow
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase_ = self._test_export(UpperCamelCase__ , '''tf''' , 12 , **UpperCamelCase__ )
lowerCamelCase_ = quantize(Path(UpperCamelCase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
@require_torch
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase_ = self._test_export(UpperCamelCase__ , '''pt''' , 12 , **UpperCamelCase__ )
lowerCamelCase_ = quantize(UpperCamelCase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase__ ).stat().st_size:
self.fail('''Quantized model is bigger than initial ONNX model''' )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase_ = Path(UpperCamelCase__ ).joinpath('''model.onnx''' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
return path
except Exception as e:
self.fail(UpperCamelCase__ )
@require_torch
@require_tokenizers
@slow
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
from transformers import BertModel
lowerCamelCase_ = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
lowerCamelCase_ = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(UpperCamelCase__ , UpperCamelCase__ , '''pt''' )
@require_tf
@require_tokenizers
@slow
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase_ = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) )
lowerCamelCase_ = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' )
self._test_infer_dynamic_axis(UpperCamelCase__ , UpperCamelCase__ , '''tf''' )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ = FeatureExtractionPipeline(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1''']
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = infer_shapes(UpperCamelCase__ , UpperCamelCase__ )
# Assert all variables are present
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , UpperCamelCase__ )
self.assertSequenceEqual(variable_names[3:] , UpperCamelCase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} )
self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} )
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = ['''input_ids''', '''attention_mask''', '''token_type_ids''']
lowerCamelCase_ = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]}
lowerCamelCase_ , lowerCamelCase_ = ensure_valid_input(FuncContiguousArgs() , UpperCamelCase__ , UpperCamelCase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCamelCase__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCamelCase__ ) , set(UpperCamelCase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCamelCase__ , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase_ , lowerCamelCase_ = ensure_valid_input(FuncNonContiguousArgs() , UpperCamelCase__ , UpperCamelCase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCamelCase__ ) , 1 )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['''input_ids'''] )
self.assertEqual(ordered_input_names[0] , '''input_ids''' )
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' )
self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() ) | 142 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowercase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) | 142 | 1 |
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [0 for i in range(len(snake_case__ ) )]
# initialize interval's left pointer and right pointer
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0, 0
for i in range(1 ,len(snake_case__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
_SCREAMING_SNAKE_CASE = min(right_pointer - i + 1 ,z_result[i - left_pointer] )
_SCREAMING_SNAKE_CASE = min_edge
while go_next(snake_case__ ,snake_case__ ,snake_case__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = i, i + z_result[i] - 1
return z_result
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
return i + z_result[i] < len(snake_case__ ) and s[z_result[i]] == s[i + z_result[i]]
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
_SCREAMING_SNAKE_CASE = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : int = "ctrl"
__snake_case : Dict = ["past_key_values"]
__snake_case : List[str] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Optional[Any] , UpperCAmelCase_: int=246_534 , UpperCAmelCase_: List[Any]=256 , UpperCAmelCase_: int=1_280 , UpperCAmelCase_: str=8_192 , UpperCAmelCase_: Optional[Any]=48 , UpperCAmelCase_: Optional[Any]=16 , UpperCAmelCase_: Optional[int]=0.1 , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: Union[str, Any]=1E-6 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Dict=True , **UpperCAmelCase_: str , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = n_positions
_SCREAMING_SNAKE_CASE = n_embd
_SCREAMING_SNAKE_CASE = n_layer
_SCREAMING_SNAKE_CASE = n_head
_SCREAMING_SNAKE_CASE = dff
_SCREAMING_SNAKE_CASE = resid_pdrop
_SCREAMING_SNAKE_CASE = embd_pdrop
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = use_cache
super().__init__(**UpperCAmelCase_ )
| 569 | 0 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __A( nn.Module ):
def __init__( self ) -> int:
'''simple docstring'''
super().__init__()
__a = nn.Linear(3 , 4 )
__a = nn.BatchNormad(4 )
__a = nn.Linear(4 , 5 )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_snake_case ) ) )
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_snake_case , model.state_dict() )
__a = os.path.join(_snake_case , '''index.json''' )
self.assertTrue(os.path.isfile(_snake_case ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__a = os.path.join(_snake_case , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(_snake_case ) )
# TODO: add tests on the fact weights are properly loaded
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__a = torch.randn(2 , 3 , dtype=_snake_case )
with TemporaryDirectory() as tmp_dir:
__a = offload_weight(_snake_case , '''weight''' , _snake_case , {} )
__a = os.path.join(_snake_case , '''weight.dat''' )
self.assertTrue(os.path.isfile(_snake_case ) )
self.assertDictEqual(_snake_case , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(_snake_case ).split('''.''' )[1]}} )
__a = load_offloaded_weight(_snake_case , index['''weight'''] )
self.assertTrue(torch.equal(_snake_case , _snake_case ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = ModelForTest()
__a = model.state_dict()
__a = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
__a = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_snake_case , _snake_case )
__a = OffloadedWeightsLoader(state_dict=_snake_case , save_folder=_snake_case )
# Every key is there with the right value
self.assertEqual(sorted(_snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_snake_case , weight_map[key] ) )
__a = {k: v for k, v in state_dict.items() if '''weight''' in k}
__a = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_snake_case , _snake_case )
__a = OffloadedWeightsLoader(state_dict=_snake_case , save_folder=_snake_case )
# Every key is there with the right value
self.assertEqual(sorted(_snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_snake_case , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_snake_case , _snake_case )
# Duplicates are removed
__a = OffloadedWeightsLoader(state_dict=_snake_case , save_folder=_snake_case )
# Every key is there with the right value
self.assertEqual(sorted(_snake_case ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_snake_case , weight_map[key] ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
__a = extract_submodules_state_dict(_snake_case , ['''a.1''', '''a.2'''] )
self.assertDictEqual(_snake_case , {'''a.1''': 0, '''a.2''': 2} )
__a = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
__a = extract_submodules_state_dict(_snake_case , ['''a.1''', '''a.2'''] )
self.assertDictEqual(_snake_case , {'''a.1.a''': 0, '''a.2.a''': 2} ) | 219 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __snake_case ( __a , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = XGLMTokenizer
SCREAMING_SNAKE_CASE__ = XGLMTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XGLMTokenizer(lowerCAmelCase_ ,keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = '<pad>'
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) ,lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) ,lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(len(lowerCAmelCase_ ) ,1008 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size ,1008 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = XGLMTokenizer(lowerCAmelCase_ ,keep_accents=lowerCAmelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase_ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowerCAmelCase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase_ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] ,)
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase_ ,f.name )
lowerCAmelCase__ = XGLMTokenizer(f.name ,keep_accents=lowerCAmelCase_ )
lowerCAmelCase__ = pickle.dumps(lowerCAmelCase_ )
pickle.loads(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ = tokenizer.tokenize(lowerCAmelCase_ )
lowerCAmelCase__ = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
lowerCAmelCase__ = tokenizer.encode(lowerCAmelCase_ ,add_special_tokens=lowerCAmelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCAmelCase_ ,add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = tokenizer.encode(lowerCAmelCase_ )
lowerCAmelCase__ = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'Hello World!'
lowerCAmelCase__ = [2, 3_1227, 4447, 35]
self.assertListEqual(lowerCAmelCase_ ,self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
lowerCAmelCase__ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 7_1630, 2_8085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 1_3675, 377, 652, 7580, 1_0341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 20_2277, 1_7892, 33, 60, 87, 4, 3234, 157, 61, 2667, 5_2376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase_ ,self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = {
'input_ids': [[2, 10_8825, 1163, 15, 8_8010, 473, 1_5898, 157, 1_3672, 1857, 312, 8, 23_8021, 1163, 53, 1_3672, 1857, 312, 8, 5_3283, 18_2396, 8, 1_8566, 16, 3_6733, 4101, 8, 230, 24_4017, 12_2553, 7, 15, 13_2597, 4, 293, 1_2511, 7610, 4, 3414, 13_2597, 9, 4, 3_2361, 362, 4, 734, 2_8512, 3_2569, 18, 4, 3_2361, 2_6096, 1_4982, 73, 1_8715, 2_1433, 23_5261, 15, 492, 1_2427, 16, 53, 1_8715, 2_1433, 6_5454, 15, 2_3659, 563, 16, 278, 597, 2843, 595, 7931, 18_2396, 6_4186, 22, 886, 595, 13_2981, 53, 2_5540, 3449, 4_3982, 3_9901, 5951, 878, 330, 4, 2_7694, 8_0269, 312, 53, 6517, 1_1780, 611, 2_0408, 5], [2, 6, 13_2597, 67, 4_2897, 33, 592, 8, 16_3729, 2_5540, 361, 13_6997, 10_9514, 17_3230, 7, 501, 60, 10_2913, 196, 5631, 235, 6_3243, 473, 6, 23_1757, 74, 5277, 7905, 53, 3095, 3_7317, 22, 454, 18_3874, 5], [2, 268, 3_1298, 4_6530, 6, 13_2935, 4_3831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ ,model_name='facebook/xglm-564M' ,padding=lowerCAmelCase_ ,)
| 719 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase : Union[str, Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
_lowerCAmelCase : Union[str, Any] = {
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
_lowerCAmelCase : int = "▁"
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ = BarthezTokenizer
def __init__( self ,a_=None ,a_=None ,a_="<s>" ,a_="</s>" ,a_="</s>" ,a_="<s>" ,a_="<unk>" ,a_="<pad>" ,a_="<mask>" ,**a_ ,):
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(a_ ,lstrip=a_ ,rstrip=a_ ) if isinstance(a_ ,a_ ) else mask_token
super().__init__(
a_ ,tokenizer_file=a_ ,bos_token=a_ ,eos_token=a_ ,unk_token=a_ ,sep_token=a_ ,cls_token=a_ ,pad_token=a_ ,mask_token=a_ ,**a_ ,)
lowerCAmelCase__ = vocab_file
lowerCAmelCase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
a_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file ,a_ )
return (out_vocab_file,)
| 604 | 0 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Tuple , a__ : Optional[int] , a__ : List[Any]=13 , a__ : Optional[int]=7 , a__ : List[str]=True , a__ : Union[str, Any]=True , a__ : Optional[int]=True , a__ : Optional[Any]=True , a__ : Union[str, Any]=99 , a__ : Union[str, Any]=32 , a__ : Tuple=5 , a__ : Dict=4 , a__ : Optional[int]=4 , a__ : Any="gelu" , a__ : Dict=0.0 , a__ : List[str]=0.1 , a__ : Dict=True , a__ : str=512 , a__ : Optional[Any]=16 , a__ : Union[str, Any]=2 , a__ : Optional[Any]=0.0_2 , a__ : List[str]=3 , a__ : Optional[int]=4 , a__ : Any=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_multiple_size
__snake_case = hidden_act
__snake_case = hidden_dropout
__snake_case = attention_dropout
__snake_case = weight_tying
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = self.get_config()
return config, input_ids, input_mask, token_labels
def a (self : Union[str, Any] ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self.prepare_config_and_inputs()
__snake_case = True
return config, input_ids, input_mask, token_labels
def a (self : str , a__ : List[str] , a__ : Optional[Any] , a__ : Any ):
"""simple docstring"""
__snake_case = GPTNeoXJapaneseModel(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , attention_mask=a__ )
__snake_case = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a (self : int , a__ : Tuple , a__ : Tuple , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = True
__snake_case = GPTNeoXJapaneseModel(a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a (self : str , a__ : List[str] , a__ : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = GPTNeoXJapaneseForCausalLM(config=a__ )
model.to(a__ )
model.eval()
__snake_case = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a (self : Optional[Any] , a__ : List[str] , a__ : str , a__ : List[str] ):
"""simple docstring"""
__snake_case = True
__snake_case = GPTNeoXJapaneseForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
__snake_case = model(a__ , attention_mask=a__ , use_cache=a__ )
__snake_case = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case = model(a__ , attention_mask=a__ , output_hidden_states=a__ )
__snake_case = output_from_no_past['''hidden_states'''][0]
__snake_case = model(
a__ , attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )['''hidden_states'''][0]
# select random slice
__snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1E-3 ) )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Tuple = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
A_ : int = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
A_ : Optional[int] = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
A_ : List[str] = False
A_ : List[Any] = False
A_ : Any = False
A_ : Any = False
def a (self : Tuple ):
"""simple docstring"""
__snake_case = GPTNeoXJapaneseModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a__ , a__ , a__ )
def a (self : int ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a__ , a__ , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case = None
self.model_tester.create_and_check_model_as_decoder(a__ , a__ , a__ )
def a (self : Tuple ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a__ , a__ , a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a__ )
@slow
def a (self : Dict ):
"""simple docstring"""
__snake_case = '''abeja/gpt-neox-japanese-2.7b'''
__snake_case = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__snake_case = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__snake_case = GPTNeoXJapaneseTokenizer.from_pretrained(a__ )
__snake_case = GPTNeoXJapaneseForCausalLM.from_pretrained(a__ )
__snake_case = []
for prompt in prompts:
__snake_case = tokenizer(a__ , return_tensors='''pt''' ).input_ids
__snake_case = model.generate(a__ , max_length=50 )
__snake_case = tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
predicted_outputs += generated_string
self.assertListEqual(a__ , a__ )
| 592 |
def lowerCamelCase__ ( snake_case_ : Dict=2_8123 ) -> Tuple:
__snake_case = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__snake_case = set()
__snake_case = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 592 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ : Optional[Any] =logging.get_logger(__name__)
a__ : List[str] ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
a__ : int =[
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : List[str] ) -> Any:
"""simple docstring"""
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__UpperCamelCase = 'lm_head'
__UpperCamelCase = getattr(__lowercase , __lowercase )
if weight_type is not None:
__UpperCamelCase = getattr(__lowercase , __lowercase ).shape
else:
__UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase__ ( __lowercase : str , __lowercase : Dict , __lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__lowercase )[0].split('.' )[-2]
__UpperCamelCase = mapped_key.replace('*' , __lowercase )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "bias" in name:
__UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = 'weight'
else:
__UpperCamelCase = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : int ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = full_name.split('conv_layers.' )[-1]
__UpperCamelCase = name.split('.' )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def lowercase__ ( __lowercase : str , __lowercase : List[str] , __lowercase : Any=None , __lowercase : Any=None , __lowercase : Dict=True ) -> Tuple:
"""simple docstring"""
if config_path is not None:
__UpperCamelCase = UniSpeechConfig.from_pretrained(__lowercase )
else:
__UpperCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load_from_json(__lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(__lowercase , 'vocab.json' )
if not os.path.isdir(__lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowercase ) )
return
os.makedirs(__lowercase , exist_ok=__lowercase )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 42
__UpperCamelCase = 43
with open(__lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__lowercase , __lowercase )
__UpperCamelCase = WavaVecaPhonemeCTCTokenizer(
__lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__lowercase , )
__UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowercase , return_attention_mask=__lowercase , )
__UpperCamelCase = WavaVecaProcessor(feature_extractor=__lowercase , tokenizer=__lowercase )
processor.save_pretrained(__lowercase )
__UpperCamelCase = UniSpeechForCTC(__lowercase )
else:
__UpperCamelCase = UniSpeechForPreTraining(__lowercase )
if is_finetuned:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__UpperCamelCase = model[0].eval()
recursively_load_weights(__lowercase , __lowercase , __lowercase )
hf_unispeech.save_pretrained(__lowercase )
if __name__ == "__main__":
a__ : Tuple =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ : List[Any] =parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 702 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __get__( self : Optional[int] , __A : List[Any] , __A : Union[str, Any]=None ):
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
__UpperCamelCase = '__cached_' + self.fget.__name__
__UpperCamelCase = getattr(__A , __A , __A )
if cached is None:
__UpperCamelCase = self.fget(__A )
setattr(__A , __A , __A )
return cached
def lowercase__ ( __lowercase : List[str] ) -> Any:
"""simple docstring"""
__UpperCamelCase = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def lowercase__ ( __lowercase : str ) -> Optional[int]:
"""simple docstring"""
if is_torch_fx_proxy(__lowercase ):
return True
if is_torch_available():
import torch
if isinstance(__lowercase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(__lowercase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(__lowercase , (jnp.ndarray, Tracer) ):
return True
return isinstance(__lowercase , np.ndarray )
def lowercase__ ( __lowercase : Optional[Any] ) -> int:
"""simple docstring"""
return isinstance(__lowercase , np.ndarray )
def lowercase__ ( __lowercase : str ) -> Tuple:
"""simple docstring"""
return _is_numpy(__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
import torch
return isinstance(__lowercase , torch.Tensor )
def lowercase__ ( __lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch(__lowercase )
def lowercase__ ( __lowercase : List[str] ) -> Tuple:
"""simple docstring"""
import torch
return isinstance(__lowercase , torch.device )
def lowercase__ ( __lowercase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(__lowercase )
def lowercase__ ( __lowercase : int ) -> Any:
"""simple docstring"""
import torch
if isinstance(__lowercase , __lowercase ):
if hasattr(__lowercase , __lowercase ):
__UpperCamelCase = getattr(__lowercase , __lowercase )
else:
return False
return isinstance(__lowercase , torch.dtype )
def lowercase__ ( __lowercase : Dict ) -> str:
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(__lowercase )
def lowercase__ ( __lowercase : List[Any] ) -> Tuple:
"""simple docstring"""
import tensorflow as tf
return isinstance(__lowercase , tf.Tensor )
def lowercase__ ( __lowercase : Dict ) -> Optional[Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(__lowercase )
def lowercase__ ( __lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(__lowercase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(__lowercase )
return type(__lowercase ) == tf.Tensor
def lowercase__ ( __lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(__lowercase )
def lowercase__ ( __lowercase : Dict ) -> List[Any]:
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(__lowercase , jnp.ndarray )
def lowercase__ ( __lowercase : List[Any] ) -> Dict:
"""simple docstring"""
return False if not is_flax_available() else _is_jax(__lowercase )
def lowercase__ ( __lowercase : List[str] ) -> Any:
"""simple docstring"""
if isinstance(__lowercase , (dict, UserDict) ):
return {k: to_py_obj(__lowercase ) for k, v in obj.items()}
elif isinstance(__lowercase , (list, tuple) ):
return [to_py_obj(__lowercase ) for o in obj]
elif is_tf_tensor(__lowercase ):
return obj.numpy().tolist()
elif is_torch_tensor(__lowercase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(__lowercase ):
return np.asarray(__lowercase ).tolist()
elif isinstance(__lowercase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowercase__ ( __lowercase : List[str] ) -> Tuple:
"""simple docstring"""
if isinstance(__lowercase , (dict, UserDict) ):
return {k: to_numpy(__lowercase ) for k, v in obj.items()}
elif isinstance(__lowercase , (list, tuple) ):
return np.array(__lowercase )
elif is_tf_tensor(__lowercase ):
return obj.numpy()
elif is_torch_tensor(__lowercase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(__lowercase ):
return np.asarray(__lowercase )
else:
return obj
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def _lowerCamelCase ( self : str ):
__UpperCamelCase = fields(self )
# Safety and consistency checks
if not len(__A ):
raise ValueError(f'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''' )
__UpperCamelCase = getattr(self , class_fields[0].name )
__UpperCamelCase = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__A ):
if isinstance(__A , __A ):
__UpperCamelCase = first_field.items()
__UpperCamelCase = True
else:
try:
__UpperCamelCase = iter(__A )
__UpperCamelCase = True
except TypeError:
__UpperCamelCase = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__A ):
if (
not isinstance(__A , (list, tuple) )
or not len(__A ) == 2
or not isinstance(element[0] , __A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__UpperCamelCase = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__UpperCamelCase = element[1]
elif first_field is not None:
__UpperCamelCase = first_field
else:
for field in class_fields:
__UpperCamelCase = getattr(self , field.name )
if v is not None:
__UpperCamelCase = v
def __delitem__( self : List[str] , *__A : Dict , **__A : Any ):
raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def _lowerCamelCase ( self : Union[str, Any] , *__A : str , **__A : Union[str, Any] ):
raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def _lowerCamelCase ( self : Tuple , *__A : List[str] , **__A : Optional[Any] ):
raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def _lowerCamelCase ( self : Union[str, Any] , *__A : Tuple , **__A : Union[str, Any] ):
raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self : Any , __A : Tuple ):
if isinstance(__A , __A ):
__UpperCamelCase = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Any , __A : Any , __A : Tuple ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__A , __A )
super().__setattr__(__A , __A )
def __setitem__( self : Union[str, Any] , __A : int , __A : List[Any] ):
# Will raise a KeyException if needed
super().__setitem__(__A , __A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__A , __A )
def _lowerCamelCase ( self : List[Any] ):
return tuple(self[k] for k in self.keys() )
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@classmethod
def _lowerCamelCase ( cls : List[str] , __A : Optional[int] ):
raise ValueError(
f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] ="longest"
SCREAMING_SNAKE_CASE_ : Dict ="max_length"
SCREAMING_SNAKE_CASE_ : str ="do_not_pad"
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple ="pt"
SCREAMING_SNAKE_CASE_ : str ="tf"
SCREAMING_SNAKE_CASE_ : str ="np"
SCREAMING_SNAKE_CASE_ : Optional[int] ="jax"
class snake_case :
"""simple docstring"""
def __init__( self : Any , __A : List[ContextManager] ):
__UpperCamelCase = context_managers
__UpperCamelCase = ExitStack()
def __enter__( self : Tuple ):
for context_manager in self.context_managers:
self.stack.enter_context(__A )
def __exit__( self : str , *__A : Dict , **__A : Optional[int] ):
self.stack.__exit__(*__A , **__A )
def lowercase__ ( __lowercase : str ) -> Any:
"""simple docstring"""
__UpperCamelCase = infer_framework(__lowercase )
if framework == "tf":
__UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowercase__ ( __lowercase : str ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = model_class.__name__
__UpperCamelCase = infer_framework(__lowercase )
if framework == "tf":
__UpperCamelCase = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__UpperCamelCase = inspect.signature(model_class.forward ) # PyTorch models
else:
__UpperCamelCase = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowercase__ ( __lowercase : MutableMapping , __lowercase : str = "" , __lowercase : str = "." ) -> Union[str, Any]:
"""simple docstring"""
def _flatten_dict(__lowercase : Tuple , __lowercase : List[Any]="" , __lowercase : Optional[Any]="." ):
for k, v in d.items():
__UpperCamelCase = str(__lowercase ) + delimiter + str(__lowercase ) if parent_key else k
if v and isinstance(__lowercase , __lowercase ):
yield from flatten_dict(__lowercase , __lowercase , delimiter=__lowercase ).items()
else:
yield key, v
return dict(_flatten_dict(__lowercase , __lowercase , __lowercase ) )
@contextmanager
def lowercase__ ( __lowercase : List[str] , __lowercase : bool = False ) -> Dict:
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowercase__ ( __lowercase : Optional[int] , __lowercase : str=None ) -> Union[str, Any]:
"""simple docstring"""
if is_numpy_array(__lowercase ):
return np.transpose(__lowercase , axes=__lowercase )
elif is_torch_tensor(__lowercase ):
return array.T if axes is None else array.permute(*__lowercase )
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.transpose(__lowercase , perm=__lowercase )
elif is_jax_tensor(__lowercase ):
return jnp.transpose(__lowercase , axes=__lowercase )
else:
raise ValueError(F'''Type not supported for transpose: {type(__lowercase )}.''' )
def lowercase__ ( __lowercase : List[Any] , __lowercase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if is_numpy_array(__lowercase ):
return np.reshape(__lowercase , __lowercase )
elif is_torch_tensor(__lowercase ):
return array.reshape(*__lowercase )
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.reshape(__lowercase , __lowercase )
elif is_jax_tensor(__lowercase ):
return jnp.reshape(__lowercase , __lowercase )
else:
raise ValueError(F'''Type not supported for reshape: {type(__lowercase )}.''' )
def lowercase__ ( __lowercase : Dict , __lowercase : Union[str, Any]=None ) -> Dict:
"""simple docstring"""
if is_numpy_array(__lowercase ):
return np.squeeze(__lowercase , axis=__lowercase )
elif is_torch_tensor(__lowercase ):
return array.squeeze() if axis is None else array.squeeze(dim=__lowercase )
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.squeeze(__lowercase , axis=__lowercase )
elif is_jax_tensor(__lowercase ):
return jnp.squeeze(__lowercase , axis=__lowercase )
else:
raise ValueError(F'''Type not supported for squeeze: {type(__lowercase )}.''' )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if is_numpy_array(__lowercase ):
return np.expand_dims(__lowercase , __lowercase )
elif is_torch_tensor(__lowercase ):
return array.unsqueeze(dim=__lowercase )
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.expand_dims(__lowercase , axis=__lowercase )
elif is_jax_tensor(__lowercase ):
return jnp.expand_dims(__lowercase , axis=__lowercase )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(__lowercase )}.''' )
def lowercase__ ( __lowercase : Optional[int] ) -> List[Any]:
"""simple docstring"""
if is_numpy_array(__lowercase ):
return np.size(__lowercase )
elif is_torch_tensor(__lowercase ):
return array.numel()
elif is_tf_tensor(__lowercase ):
import tensorflow as tf
return tf.size(__lowercase )
elif is_jax_tensor(__lowercase ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(__lowercase )}.''' )
def lowercase__ ( __lowercase : Tuple , __lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(__lowercase , (tuple, list) ):
__UpperCamelCase = [F'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
__UpperCamelCase = F'''{repo_id}--{value}'''
return auto_map
def lowercase__ ( __lowercase : str ) -> List[Any]:
"""simple docstring"""
for base_class in inspect.getmro(__lowercase ):
__UpperCamelCase = base_class.__module__
__UpperCamelCase = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 434 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 75 |
import collections
import os
import re
from pathlib import Path
lowerCamelCase_ : Optional[Any] = """src/transformers"""
# Matches is_xxx_available()
lowerCamelCase_ : Union[str, Any] = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ : int = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ : Union[str, Any] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowerCamelCase_ : Any = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ : Any = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ : List[Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ : Any = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ : Tuple = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ : Tuple = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowerCamelCase_ : Dict = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowerCamelCase_ : Union[str, Any] = re.compile(r"""^\s*else:""")
def A__ ( lowerCamelCase ) -> List[Any]:
if _re_test_backend.search(lowerCamelCase ) is None:
return None
UpperCamelCase_: Any = [b[0] for b in _re_backend.findall(lowerCamelCase )]
backends.sort()
return "_and_".join(lowerCamelCase )
def A__ ( lowerCamelCase ) -> Union[str, Any]:
with open(lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase_: Dict = f.readlines()
UpperCamelCase_: Tuple = 0
while line_index < len(lowerCamelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase_: Optional[int] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCamelCase_: Any = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase ):
UpperCamelCase_: str = _re_one_line_import_struct.search(lowerCamelCase ).groups()[0]
UpperCamelCase_: Tuple = re.findall(r"""\[([^\]]+)\]""" , lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCamelCase_: Any = _re_import_struct_key_value.search(lowerCamelCase )
if single_line_import_search is not None:
UpperCamelCase_: Any = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase_: Optional[Any] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase_: Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_: Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCamelCase_: Tuple = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCamelCase ) is not None:
UpperCamelCase_: List[str] = _re_import_struct_add_many.search(lowerCamelCase ).groups()[0].split(""", """ )
UpperCamelCase_: str = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_between_brackets.search(lowerCamelCase ) is not None:
UpperCamelCase_: Tuple = _re_between_brackets.search(lowerCamelCase ).groups()[0].split(""", """ )
UpperCamelCase_: List[Any] = [obj[1:-1] for obj in imports if len(lowerCamelCase ) > 0]
objects.extend(lowerCamelCase )
elif _re_quote_object.search(lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(lowerCamelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase_: List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase_: List[str] = []
while (
line_index < len(lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCamelCase_: List[str] = lines[line_index]
UpperCamelCase_: Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase_: Any = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase_: str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase_: List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase_: int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCamelCase_: Tuple = lines[line_index]
UpperCamelCase_: Union[str, Any] = _re_import.search(lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase_: Optional[Any] = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowerCamelCase , lowerCamelCase ) -> List[Any]:
def find_duplicates(lowerCamelCase ):
return [k for k, v in collections.Counter(lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase_: Optional[int] = []
for key in import_dict_objects.keys():
UpperCamelCase_: List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
UpperCamelCase_: Tuple = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase_: int = """base imports""" if key == """none""" else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def A__ ( ) -> Tuple:
UpperCamelCase_: Optional[int] = []
for root, _, files in os.walk(lowerCamelCase ):
if "__init__.py" in files:
UpperCamelCase_: Union[str, Any] = os.path.join(lowerCamelCase , """__init__.py""" )
UpperCamelCase_: Optional[int] = parse_init(lowerCamelCase )
if objects is not None:
UpperCamelCase_: Any = analyze_results(*lowerCamelCase )
if len(lowerCamelCase ) > 0:
UpperCamelCase_: Any = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append("""\n""".join(lowerCamelCase ) )
if len(lowerCamelCase ) > 0:
raise ValueError("""\n\n""".join(lowerCamelCase ) )
def A__ ( ) -> Any:
UpperCamelCase_: List[Any] = []
for path, directories, files in os.walk(lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCamelCase_: str = str((Path(lowerCamelCase ) / folder).relative_to(lowerCamelCase ) )
UpperCamelCase_: Optional[Any] = short_path.replace(os.path.sep , """.""" )
submodules.append(lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase_: Dict = str((Path(lowerCamelCase ) / fname).relative_to(lowerCamelCase ) )
UpperCamelCase_: Optional[int] = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowerCamelCase )
return submodules
lowerCamelCase_ : Optional[int] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def A__ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
UpperCamelCase_: Optional[Any] = direct_transformers_import(lowerCamelCase )
UpperCamelCase_: Tuple = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase , """__init__.py""" ) , """r""" ) as f:
UpperCamelCase_: List[Any] = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , lowerCamelCase ) ) )
UpperCamelCase_: Dict = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase ) > 0:
UpperCamelCase_: Dict = """\n""".join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F'''{list_of_modules}\n'''
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 548 | 0 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
__magic_name__ : Tuple = logging.get_logger(__name__)
def lowerCAmelCase ( snake_case__ : Union[str, Any] , snake_case__ : str )-> Optional[int]:
try:
with open(snake_case__ , "rb" ) as flax_state_f:
A_ = from_bytes(snake_case__ , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(snake_case__ ) as f:
if f.read().startswith("version" ):
raise OSError(
"You seem to have cloned a repository without having git-lfs installed. Please"
" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
" folder you cloned." )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def lowerCAmelCase ( snake_case__ : Optional[int] , snake_case__ : Any )-> Optional[int]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
A_ = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
A_ = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
A_ = ""
A_ = flatten_dict(snake_case__ , sep="." )
A_ = pt_model.state_dict()
# keep track of unexpected & missing keys
A_ = []
A_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A_ = flax_key_tuple.split("." )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
A_ = flax_key_tuple_array[:-1] + ["weight"]
A_ = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
A_ = flax_key_tuple_array[:-1] + ["weight"]
A_ = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
A_ = flax_key_tuple_array[:-1] + ["weight"]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(snake_case__ ):
A_ = (
flax_key_tuple_string.replace("_0" , ".0" )
.replace("_1" , ".1" )
.replace("_2" , ".2" )
.replace("_3" , ".3" )
.replace("_4" , ".4" )
.replace("_5" , ".5" )
.replace("_6" , ".6" )
.replace("_7" , ".7" )
.replace("_8" , ".8" )
.replace("_9" , ".9" )
)
A_ = ".".join(snake_case__ )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
A_ = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
A_ = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
A_ = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
if len(snake_case__ ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
return pt_model
| 608 |
import math
def lowerCAmelCase ( snake_case__ : int )-> str:
A_ = 0
A_ = 0
while num > 0:
A_ = num % 8
A_ = octal + (remainder * math.floor(math.pow(10 , snake_case__ ) ))
counter += 1
A_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'0o{int(snake_case__ )}'
def lowerCAmelCase ( )-> None:
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 608 | 1 |
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number | (1 << position)
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number & ~(1 << position)
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number ^ (1 << position)
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
return ((number >> position) & 1) == 1
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 352 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_lowerCamelCase : List[str] = """\
"""
_lowerCamelCase : Optional[int] = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_lowerCamelCase : List[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase ( datasets.Metric):
'''simple docstring'''
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def lowerCamelCase_ ( self : List[Any] , snake_case : Optional[int] , snake_case : int , snake_case : int = 16 , snake_case : bool = True , snake_case : Dict=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cuda'
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Dict = model.to(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
SCREAMING_SNAKE_CASE : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(snake_case ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
SCREAMING_SNAKE_CASE : List[str] = model.config.max_length - 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = model.config.max_length
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , return_tensors='pt' , return_attention_mask=snake_case , ).to(snake_case )
SCREAMING_SNAKE_CASE : List[Any] = encodings['input_ids']
SCREAMING_SNAKE_CASE : Dict = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(snake_case ) , snake_case ) ):
SCREAMING_SNAKE_CASE : Dict = min(start_index + batch_size , len(snake_case ) )
SCREAMING_SNAKE_CASE : List[str] = encoded_texts[start_index:end_index]
SCREAMING_SNAKE_CASE : Optional[int] = attn_masks[start_index:end_index]
if add_start_token:
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
SCREAMING_SNAKE_CASE : int = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(snake_case ), attn_mask] , dim=1 )
SCREAMING_SNAKE_CASE : Optional[Any] = encoded_batch
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(snake_case , attention_mask=snake_case ).logits
SCREAMING_SNAKE_CASE : Optional[Any] = out_logits[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = attn_mask[..., 1:].contiguous()
SCREAMING_SNAKE_CASE : Optional[int] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , snake_case ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(snake_case )} | 352 | 1 |
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
A = 0
A = 0
A = {}
def _UpperCAmelCase ( self , a__ ) -> Union[str, Any]:
if vertex not in self.adjacency:
A = {}
self.num_vertices += 1
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> Tuple:
self.add_vertex(a__ )
self.add_vertex(a__ )
if head == tail:
return
A = weight
A = weight
def _UpperCAmelCase ( self ) -> Dict:
A = self.get_edges()
for edge in edges:
A , A , A = edge
edges.remove((tail, head, weight) )
for i in range(len(a__ ) ):
A = list(edges[i] )
edges.sort(key=lambda a__ : e[2] )
for i in range(len(a__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
A = edges[i][2] + 1
for edge in edges:
A , A , A = edge
A = weight
A = weight
def __str__( self ) -> str:
A = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
A = self.adjacency[head][tail]
string += f'{head} -> {tail} == {weight}\n'
return string.rstrip("""\n""" )
def _UpperCAmelCase ( self ) -> Dict:
A = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _UpperCAmelCase ( self ) -> Any:
return self.adjacency.keys()
@staticmethod
def _UpperCAmelCase ( a__=None , a__=None ) -> Tuple:
A = Graph()
if vertices is None:
A = []
if edges is None:
A = []
for vertex in vertices:
g.add_vertex(a__ )
for edge in edges:
g.add_edge(*a__ )
return g
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> List[str]:
A = {}
A = {}
def __len__( self ) -> Optional[int]:
return len(self.parent )
def _UpperCAmelCase ( self , a__ ) -> str:
if item in self.parent:
return self.find(a__ )
A = item
A = 0
return item
def _UpperCAmelCase ( self , a__ ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(a__ )
if item != self.parent[item]:
A = self.find(self.parent[item] )
return self.parent[item]
def _UpperCAmelCase ( self , a__ , a__ ) -> int:
A = self.find(a__ )
A = self.find(a__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
A = roota
return roota
if self.rank[roota] < self.rank[roota]:
A = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
A = roota
return roota
return None
@staticmethod
def _UpperCAmelCase ( a__ ) -> List[str]:
A = graph.num_vertices
A = Graph.UnionFind()
A = []
while num_components > 1:
A = {}
for vertex in graph.get_vertices():
A = -1
A = graph.get_edges()
for edge in edges:
A , A , A = edge
edges.remove((tail, head, weight) )
for edge in edges:
A , A , A = edge
A = union_find.find(a__ )
A = union_find.find(a__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
A = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
A , A , A = cheap_edge[vertex]
if union_find.find(a__ ) != union_find.find(a__ ):
union_find.union(a__ , a__ )
mst_edges.append(cheap_edge[vertex] )
A = num_components - 1
A = Graph.build(edges=a__ )
return mst
| 546 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _lowerCAmelCase ( UpperCamelCase__: Any ) -> Tuple:
"""simple docstring"""
def wrapper(*UpperCamelCase__: Union[str, Any] , **UpperCamelCase__: List[str] ):
A = timeit.default_timer()
A = func(*UpperCamelCase__ , **UpperCamelCase__ )
A = timeit.default_timer() - starttime
return delta
A = func.__name__
return wrapper
def _lowerCAmelCase ( UpperCamelCase__: dict , UpperCamelCase__: List[str]=1_00 , UpperCamelCase__: int=None ) -> Any:
"""simple docstring"""
A = []
A = seq_shapes or {}
for i in range(UpperCamelCase__ ):
A = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(UpperCamelCase__ , _ArrayXD ):
A = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(UpperCamelCase__ , datasets.Value ):
if v.dtype == "string":
A = """The small grey turtle was surprisingly fast when challenged."""
else:
A = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(UpperCamelCase__ , datasets.Sequence ):
while isinstance(UpperCamelCase__ , datasets.Sequence ):
A = v.feature
A = seq_shapes[k]
A = np.random.rand(*UpperCamelCase__ ).astype(v.dtype )
A = data
dummy_data.append((i, example) )
return dummy_data
def _lowerCAmelCase ( UpperCamelCase__: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str]=1_00 , UpperCamelCase__: str=None ) -> Optional[int]:
"""simple docstring"""
A = generate_examples(UpperCamelCase__ , num_examples=UpperCamelCase__ , seq_shapes=UpperCamelCase__ )
with ArrowWriter(features=UpperCamelCase__ , path=UpperCamelCase__ ) as writer:
for key, record in dummy_data:
A = features.encode_example(UpperCamelCase__ )
writer.write(UpperCamelCase__ )
A , A = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
A = datasets.Dataset.from_file(filename=UpperCamelCase__ , info=datasets.DatasetInfo(features=UpperCamelCase__ ) )
return dataset
| 546 | 1 |
from itertools import product
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> list[int]:
'''simple docstring'''
lowerCamelCase__: Any = sides_number
lowerCamelCase__: int = max_face_number * dice_number
lowerCamelCase__: Optional[Any] = [0] * (max_total + 1)
lowerCamelCase__: Optional[int] = 1
lowerCamelCase__: int = range(_UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(_UpperCamelCase , repeat=_UpperCamelCase ):
lowerCamelCase__: List[str] = sum(_UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCAmelCase ( ) -> float:
'''simple docstring'''
lowerCamelCase__: Optional[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase__: Union[str, Any] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase__: Dict = 0
lowerCamelCase__: Optional[int] = 9
lowerCamelCase__: Dict = 4 * 9
lowerCamelCase__: Optional[int] = 6
for peter_total in range(_UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase__: Optional[int] = (4**9) * (6**6)
lowerCamelCase__: Union[str, Any] = peter_wins_count / total_games_number
lowerCamelCase__: Optional[Any] = round(_UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 306 |
_lowercase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_lowercase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list[int]:
'''simple docstring'''
lowerCamelCase__: Optional[int] = True
lowerCamelCase__: Union[str, Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
order.append(_UpperCamelCase )
return order
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> list[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = True
lowerCamelCase__: Optional[int] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return component
def __lowerCAmelCase ( _UpperCamelCase ) -> list[list[int]]:
'''simple docstring'''
lowerCamelCase__: List[Any] = len(_UpperCamelCase ) * [False]
lowerCamelCase__: dict[int, list[int]] = {vert: [] for vert in range(len(_UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCamelCase )
lowerCamelCase__: Tuple = []
for i, was_visited in enumerate(_UpperCamelCase ):
if not was_visited:
order += topology_sort(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCamelCase__: Tuple = []
lowerCamelCase__: Optional[Any] = len(_UpperCamelCase ) * [False]
for i in range(len(_UpperCamelCase ) ):
lowerCamelCase__: Optional[Any] = order[len(_UpperCamelCase ) - i - 1]
if not visited[vert]:
lowerCamelCase__: Optional[Any] = find_components(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
components_list.append(_UpperCamelCase )
return components_list
| 306 | 1 |
'''simple docstring'''
from __future__ import annotations
__A : Union[str, Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ):
lowerCamelCase__ = [
[0 for col in range(len(grid[0]))] for row in range(len(lowercase__))
] # the reference grid
lowerCamelCase__ = 1
lowerCamelCase__ = [
[0 for col in range(len(grid[0]))] for row in range(len(lowercase__))
] # the action grid
lowerCamelCase__ = init[0]
lowerCamelCase__ = init[1]
lowerCamelCase__ = 0
lowerCamelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase__ = [[f, g, x, y]]
lowerCamelCase__ = False # flag that is set when search is complete
lowerCamelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(lowercase__) == 0:
raise ValueError("Algorithm is unable to find solution")
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase__ = cell.pop()
lowerCamelCase__ = next_cell[2]
lowerCamelCase__ = next_cell[3]
lowerCamelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase__ = True
else:
for i in range(len(lowercase__)): # to try out different valid actions
lowerCamelCase__ = x + DIRECTIONS[i][0]
lowerCamelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowercase__) and ya >= 0 and ya < len(grid[0]):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase__ = g + cost
lowerCamelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya])
lowerCamelCase__ = 1
lowerCamelCase__ = i
lowerCamelCase__ = []
lowerCamelCase__ = goal[0]
lowerCamelCase__ = goal[1]
invpath.append([x, y]) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase__ = xa
lowerCamelCase__ = ya
invpath.append([x, y])
lowerCamelCase__ = []
for i in range(len(lowercase__)):
path.append(invpath[len(lowercase__) - 1 - i])
return path, action
if __name__ == "__main__":
__A : Dict = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__A : Any = [0, 0]
# all coordinates are given in format [y,x]
__A : str = [len(grid) - 1, len(grid[0]) - 1]
__A : str = 1
# the cost map which pushes the path closer to the goal
__A : Tuple = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__A : Any = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__A : Optional[Any] = 99
__A , __A : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 187 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Tuple = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "gpt_neo"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : str , __lowerCamelCase : Optional[int]=50257 , __lowerCamelCase : Optional[int]=2048 , __lowerCamelCase : Optional[int]=2048 , __lowerCamelCase : Any=24 , __lowerCamelCase : List[Any]=[[["global", "local"], 12]] , __lowerCamelCase : Any=16 , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=256 , __lowerCamelCase : List[Any]="gelu_new" , __lowerCamelCase : Any=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=1E-5 , __lowerCamelCase : List[Any]=0.0_2 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=50256 , __lowerCamelCase : Tuple=50256 , **__lowerCamelCase : Optional[int] , ) -> str:
'''simple docstring'''
lowerCamelCase__ = vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_layers
lowerCamelCase__ = num_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = window_size
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_dropout
lowerCamelCase__ = embed_dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = classifier_dropout
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
lowerCamelCase__ = attention_types
lowerCamelCase__ = self.expand_attention_types_params(__lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@staticmethod
def a__ ( __lowerCamelCase : int ) -> str:
'''simple docstring'''
lowerCamelCase__ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__):
import torch
lowerCamelCase__ = input.size()
lowerCamelCase__ = len(lowercase__)
lowerCamelCase__ = shape[dimension]
lowerCamelCase__ = torch.arange(0 , lowercase__ , lowercase__)
lowerCamelCase__ = torch.div(sizedim - size , lowercase__ , rounding_mode="floor") + 1
lowerCamelCase__ = torch.arange(lowercase__) + low_indices[:min_length][:, None]
lowerCamelCase__ = [slice(lowercase__)] * rank
lowerCamelCase__ = indices
lowerCamelCase__ = input[s]
lowerCamelCase__ = list(range(0 , rank + 1))
perm.append(perm.pop(dimension + 1))
return sliced.permute(lowercase__)
def lowerCamelCase_ ( lowercase__ , lowercase__):
import torch
lowerCamelCase__ = torch.arange(1 , lowercase__)
lowerCamelCase__ = torch.remainder(lowercase__ , lowercase__)
lowerCamelCase__ = remainders == 0
lowerCamelCase__ = candidates[divisor_indices]
lowerCamelCase__ = torch.max(lowercase__)
return largest_divisor, torch.div(lowercase__ , lowercase__ , rounding_mode="floor")
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
@property
def a__ ( self : Any ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowerCamelCase__ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase , direction="inputs" )
lowerCamelCase__ = {0: "batch", 1: "past_sequence + sequence"}
else:
lowerCamelCase__ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def a__ ( self : Any ) -> int:
'''simple docstring'''
return self._config.num_heads
def a__ ( self : Optional[Any] , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int = -1 , __lowerCamelCase : int = -1 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[TensorType] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
lowerCamelCase__ = super(__lowerCamelCase , self ).generate_dummy_inputs(
__lowerCamelCase , batch_size=__lowerCamelCase , seq_length=__lowerCamelCase , is_pair=__lowerCamelCase , framework=__lowerCamelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["attention_mask"]
if self.use_past:
lowerCamelCase__ = ordered_inputs["attention_mask"].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__lowerCamelCase , __lowerCamelCase , dtype=__lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def a__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return 13
| 187 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def snake_case ( lowerCamelCase = 8 ):
'''simple docstring'''
__lowercase = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCamelCase ) for _ in range(lowerCamelCase ) )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
i -= len(lowerCamelCase )
__lowercase = i // 3
__lowercase = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
__lowercase = (
chars_incl
+ random(lowerCamelCase , quotient + remainder )
+ random(lowerCamelCase , lowerCamelCase )
+ random(lowerCamelCase , lowerCamelCase )
)
__lowercase = list(lowerCamelCase )
shuffle(lowerCamelCase )
return "".join(lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
return "".join(secrets.choice(lowerCamelCase ) for _ in range(lowerCamelCase ) )
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def snake_case ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
pass # Put your code here...
def snake_case ( lowerCamelCase , lowerCamelCase = 8 ):
'''simple docstring'''
if len(lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
__lowercase = any(char in ascii_uppercase for char in password )
__lowercase = any(char in ascii_lowercase for char in password )
__lowercase = any(char in digits for char in password )
__lowercase = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def snake_case ( ):
'''simple docstring'''
__lowercase = int(input("""Please indicate the max length of your password: """ ).strip() )
__lowercase = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(lowerCamelCase ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(lowerCamelCase , lowerCamelCase ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 80 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[Any] = logging.get_logger(__name__)
_lowerCAmelCase :Tuple = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : List[Any] = "fnet"
def __init__( self , lowercase__=32_000 , lowercase__=768 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=512 , lowercase__=4 , lowercase__=0.0_2 , lowercase__=1E-12 , lowercase__=False , lowercase__=512 , lowercase__=3 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = use_tpu_fourier_optimizations
SCREAMING_SNAKE_CASE : str = tpu_short_seq_length
| 251 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class lowercase__ ( unittest.TestCase ):
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'do_convert_rgb': True,
}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Dict , **UpperCAmelCase_ : Dict ):
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : List[Any] , **UpperCAmelCase_ : Optional[int] ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : Any , **UpperCAmelCase_ : List[str] ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def A_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE__ = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
SCREAMING_SNAKE_CASE__ = self.get_image_processor(do_normalize=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=UpperCAmelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def A_ ( self : Dict ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = image_processor(UpperCAmelCase_ , return_tensors='np' )
SCREAMING_SNAKE_CASE__ = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 'Alexandra,T-shirt的价格是15便士。'
SCREAMING_SNAKE_CASE__ = processor(text=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 'Alexandra,T-shirt的价格是15便士。'
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE__ = processor.batch_decode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.get_image_processor()
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = ChineseCLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = 'Alexandra,T-shirt的价格是15便士。'
SCREAMING_SNAKE_CASE__ = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE__ = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 400 |
from collections.abc import Callable
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = a
SCREAMING_SNAKE_CASE__ = b
if function(UpperCamelCase_ ) == 0: # one of the a or b is a root for the function
return a
elif function(UpperCamelCase_ ) == 0:
return b
elif (
function(UpperCamelCase_ ) * function(UpperCamelCase_ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
SCREAMING_SNAKE_CASE__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(UpperCamelCase_ ) == 0:
return mid
elif function(UpperCamelCase_ ) * function(UpperCamelCase_ ) < 0:
SCREAMING_SNAKE_CASE__ = mid
else:
SCREAMING_SNAKE_CASE__ = mid
SCREAMING_SNAKE_CASE__ = start + (end - start) / 2.0
return mid
def _lowercase ( UpperCamelCase_ ) -> float:
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 400 | 1 |
'''simple docstring'''
from maths.prime_check import is_prime
def snake_case__ ( _A: int ) -> int:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
lowerCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
'''simple docstring'''
from collections.abc import Sequence
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(__snake_case ) )
def a_ ( __snake_case : Sequence[float] , __snake_case : float ) -> float:
"""simple docstring"""
lowerCamelCase_ =0.0
for coeff in reversed(__snake_case ):
lowerCamelCase_ =result * x + coeff
return result
if __name__ == "__main__":
a_ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0)
a_ : Tuple = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 676 | 0 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCamelCase : List[Any] = get_tests_dir('''fixtures/dummy-config.json''')
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self ) -> Tuple:
_A : str = 0
def _lowerCamelCase ( self ) -> List[Any]:
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
_A : str = AutoConfig.from_pretrained('''bert-base-uncased''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Any:
_A : List[Any] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : List[str] = AutoConfig.for_model('''roberta''' )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
_A : Tuple = os.path.join(UpperCAmelCase__ , '''fake-roberta''' )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , '''config.json''' ) , '''w''' ) as f:
f.write(json.dumps({} ) )
_A : List[str] = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertEqual(type(UpperCAmelCase__ ) , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , UpperCAmelCase__ )
# Wrong model type will raise an error
with self.assertRaises(UpperCAmelCase__ ):
AutoConfig.register('''model''' , UpperCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase__ ):
AutoConfig.register('''bert''' , UpperCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
_A : List[str] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase__ )
_A : int = AutoConfig.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowerCamelCase ( self ) -> str:
with self.assertRaisesRegex(
UpperCAmelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
_A : Any = AutoConfig.from_pretrained('''bert-base''' )
def _lowerCamelCase ( self ) -> List[str]:
with self.assertRaisesRegex(
UpperCAmelCase__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_A : Union[str, Any] = AutoConfig.from_pretrained(UpperCAmelCase__ , revision='''aaaaaa''' )
def _lowerCamelCase ( self ) -> List[str]:
with self.assertRaisesRegex(
UpperCAmelCase__ , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ):
_A : Tuple = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' )
def _lowerCamelCase ( self ) -> Dict:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(UpperCAmelCase__ ):
_A : int = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase__ ):
_A : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCAmelCase__ )
_A : Optional[int] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCAmelCase__ )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase__ )
_A : Dict = AutoConfig.from_pretrained(UpperCAmelCase__ , trust_remote_code=UpperCAmelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' )
def _lowerCamelCase ( self ) -> List[Any]:
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = """new-model"""
try:
AutoConfig.register('''new-model''' , UpperCAmelCase__ )
# If remote code is not set, the default is to use local
_A : Any = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote code is disabled, we load the local one.
_A : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCAmelCase__ )
self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' )
# If remote is enabled, we load from the Hub
_A : List[str] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=UpperCAmelCase__ )
self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 417 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = """esm"""
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=7_6_8 , UpperCAmelCase__=1_2 , UpperCAmelCase__=1_2 , UpperCAmelCase__=3_0_7_2 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=1_0_2_6 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__="absolute" , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=None , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> List[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_A : List[Any] = vocab_size
_A : int = hidden_size
_A : Any = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Dict = intermediate_size
_A : Union[str, Any] = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : List[str] = max_position_embeddings
_A : List[str] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : Tuple = position_embedding_type
_A : Any = use_cache
_A : Tuple = emb_layer_norm_before
_A : Union[str, Any] = token_dropout
_A : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_A : Dict = EsmFoldConfig()
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_A : List[str] = EsmFoldConfig(**UpperCAmelCase__ )
_A : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_A : Tuple = get_default_vocab_list()
else:
_A : int = vocab_list
else:
_A : Dict = None
_A : Optional[int] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def _lowerCamelCase ( self ) -> Optional[Any]:
_A : str = super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase__ ):
_A : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = 0
__magic_name__ = True
__magic_name__ = False
__magic_name__ = 1_2_8
__magic_name__ = None
def _lowerCamelCase ( self ) -> List[Any]:
if self.trunk is None:
_A : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase__ ):
_A : Dict = TrunkConfig(**self.trunk )
def _lowerCamelCase ( self ) -> Optional[Any]:
_A : Dict = asdict(self )
_A : str = self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__magic_name__ = 4_8
__magic_name__ = 1_0_2_4
__magic_name__ = 1_2_8
__magic_name__ = 3_2
__magic_name__ = 3_2
__magic_name__ = 3_2
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 4
__magic_name__ = 1_2_8
__magic_name__ = None
def _lowerCamelCase ( self ) -> str:
if self.structure_module is None:
_A : List[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase__ ):
_A : Any = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_A : List[Any] = self.sequence_state_dim // self.sequence_head_width
_A : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowerCamelCase ( self ) -> Tuple:
_A : Optional[Any] = asdict(self )
_A : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__magic_name__ = 3_8_4
__magic_name__ = 1_2_8
__magic_name__ = 1_6
__magic_name__ = 1_2_8
__magic_name__ = 1_2
__magic_name__ = 4
__magic_name__ = 8
__magic_name__ = 0.1
__magic_name__ = 8
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 7
__magic_name__ = 1_0
__magic_name__ = 1e-8
__magic_name__ = 1e5
def _lowerCamelCase ( self ) -> Union[str, Any]:
return asdict(self )
def lowercase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 417 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
# Load configuration defined in the metadata file
with open(_lowerCamelCase ) as metadata_file:
lowercase__ : int = json.load(_lowerCamelCase )
lowercase__ : Union[str, Any] = LukeConfig(use_entity_aware_attention=_lowerCamelCase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
lowercase__ : List[Any] = torch.load(_lowerCamelCase , map_location='''cpu''' )
# Load the entity vocab file
lowercase__ : Optional[Any] = load_entity_vocab(_lowerCamelCase )
lowercase__ : Optional[Any] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
lowercase__ : str = AddedToken('''<ent>''' , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
lowercase__ : Dict = AddedToken('''<ent2>''' , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
lowercase__ : List[Any] = LukeTokenizer.from_pretrained(_lowerCamelCase )
# Initialize the embeddings of the special tokens
lowercase__ : List[str] = state_dict["""embeddings.word_embeddings.weight"""]
lowercase__ : str = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
lowercase__ : Optional[int] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
lowercase__ : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowercase__ : Optional[Any] = F"""encoder.layer.{layer_index}.attention.self."""
lowercase__ : Tuple = state_dict[prefix + matrix_name]
lowercase__ : str = state_dict[prefix + matrix_name]
lowercase__ : Dict = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowercase__ : Optional[int] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
lowercase__ : Tuple = entity_emb[entity_vocab["""[MASK]"""]]
lowercase__ : List[str] = LukeModel(config=_lowerCamelCase ).eval()
lowercase__ : Tuple = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if not (len(_lowerCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F"""Missing keys {', '.join(_lowerCamelCase )}. Expected only missing embeddings.position_ids""" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
F""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" )
# Check outputs
lowercase__ : Dict = LukeTokenizer.from_pretrained(_lowerCamelCase , task='''entity_classification''' )
lowercase__ : Tuple = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
lowercase__ : Tuple = (39, 42)
lowercase__ : int = tokenizer(_lowerCamelCase , entity_spans=[span] , add_prefix_space=_lowerCamelCase , return_tensors='''pt''' )
lowercase__ : List[Any] = model(**_lowerCamelCase )
# Verify word hidden states
if model_size == "large":
lowercase__ : List[Any] = torch.Size((1, 42, 1024) )
lowercase__ : int = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
lowercase__ : List[str] = torch.Size((1, 42, 768) )
lowercase__ : Optional[Any] = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowercase__ : List[str] = torch.Size((1, 1, 1024) )
lowercase__ : Any = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
lowercase__ : Dict = torch.Size((1, 1, 768) )
lowercase__ : Tuple = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_lowerCamelCase ) )
model.save_pretrained(_lowerCamelCase )
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[Any] = {}
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(_lowerCamelCase ):
lowercase__ : Union[str, Any] = line.rstrip().split('''\t''' )
lowercase__ : Tuple = index
return entity_vocab
if __name__ == "__main__":
__a: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__a: Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 152 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 581 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_a : Tuple= logging.get_logger(__name__)
_a : str= {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_a : Optional[Any]= {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_a : Optional[int]= {
"ctrl": 256,
}
_a : Any= {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> List[Any]:
'''simple docstring'''
__snake_case : Any = set()
__snake_case : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case : List[str] = char
__snake_case : Any = set(UpperCAmelCase_ )
return pairs
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : str = CONTROL_CODES
def __init__(self : Optional[int] , _A : Tuple , _A : Optional[int] , _A : str="<unk>" , **_A : Union[str, Any]) -> Dict:
super().__init__(unk_token=_A , **_A)
with open(_A , encoding='utf-8') as vocab_handle:
__snake_case : Optional[int] = json.load(_A)
__snake_case : Any = {v: k for k, v in self.encoder.items()}
with open(_A , encoding='utf-8') as merges_handle:
__snake_case : Any = merges_handle.read().split('\n')[1:-1]
__snake_case : Dict = [tuple(merge.split()) for merge in merges]
__snake_case : str = dict(zip(_A , range(len(_A))))
__snake_case : List[str] = {}
@property
def _lowercase (self : Optional[Any]) -> Optional[int]:
return len(self.encoder)
def _lowercase (self : Optional[int]) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder)
def _lowercase (self : List[Any] , _A : Any) -> str:
if token in self.cache:
return self.cache[token]
__snake_case : Any = tuple(_A)
__snake_case : str = tuple(list(word[:-1]) + [word[-1] + '</w>'])
__snake_case : Tuple = get_pairs(_A)
if not pairs:
return token
while True:
__snake_case : Optional[int] = min(_A , key=lambda _A: self.bpe_ranks.get(_A , float('inf')))
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case : List[str] = bigram
__snake_case : Optional[Any] = []
__snake_case : Tuple = 0
while i < len(_A):
try:
__snake_case : Optional[Any] = word.index(_A , _A)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__snake_case : Any = j
if word[i] == first and i < len(_A) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__snake_case : Optional[Any] = tuple(_A)
__snake_case : Any = new_word
if len(_A) == 1:
break
else:
__snake_case : Union[str, Any] = get_pairs(_A)
__snake_case : Optional[Any] = '@@ '.join(_A)
__snake_case : Dict = word[:-4]
__snake_case : Union[str, Any] = word
return word
def _lowercase (self : Optional[Any] , _A : List[str]) -> Optional[int]:
__snake_case : Optional[int] = []
__snake_case : int = re.findall(r'\S+\n?' , _A)
for token in words:
split_tokens.extend(list(self.bpe(_A).split(' ')))
return split_tokens
def _lowercase (self : Any , _A : int) -> List[str]:
return self.encoder.get(_A , self.encoder.get(self.unk_token))
def _lowercase (self : List[str] , _A : List[str]) -> List[Any]:
return self.decoder.get(_A , self.unk_token)
def _lowercase (self : List[str] , _A : int) -> Optional[Any]:
__snake_case : List[Any] = ' '.join(_A).replace('@@ ' , '').strip()
return out_string
def _lowercase (self : Tuple , _A : str , _A : Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_A):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__snake_case : Optional[Any] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__snake_case : List[str] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(_A , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A) + '\n')
__snake_case : List[str] = 0
with open(_A , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!')
__snake_case : List[str] = token_index
writer.write(' '.join(_A) + '\n')
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 192 | """simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
_a : List[str]= yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
_a : Dict= {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_a : str= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : Tuple= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : Dict= {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
_a : str= "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : Tuple= (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
_a : Optional[int]= "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : List[str]= (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
_a : str= "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : Optional[int]= "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
_a : int= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : str= "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
_a : int= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
_a : Optional[Any]= "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
_a : Dict= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
_a : Optional[Any]= "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
_a : int= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
_a : Optional[Any]= "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
_a : Optional[int]= "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : Any= "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
_a : Tuple= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
_a : List[str]= "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
_a : List[Any]= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : Tuple= "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
_a : Optional[Any]= ""
_a : Tuple= "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
_a : Union[str, Any]= "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
_a : List[Any]= "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ) -> Tuple:
'''simple docstring'''
assert ReadMe.from_string(UpperCAmelCase_ , UpperCAmelCase_ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(UpperCAmelCase_ , match=re.escape(expected_error.format(path='root' ) ) ):
__snake_case : Tuple = ReadMe.from_string(UpperCAmelCase_ , UpperCAmelCase_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ) -> Optional[Any]:
'''simple docstring'''
with pytest.raises(UpperCAmelCase_ , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
ReadMe.from_string(UpperCAmelCase_ , UpperCAmelCase_ , suppress_parsing_errors=UpperCAmelCase_ )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Union[str, Any] = Path(UpperCAmelCase_ ) / 'README.md'
with open(UpperCAmelCase_ , 'w+' ) as readme_file:
readme_file.write(UpperCAmelCase_ )
__snake_case : Optional[Any] = ReadMe.from_readme(UpperCAmelCase_ , UpperCAmelCase_ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[int] = Path(UpperCAmelCase_ ) / 'README.md'
with open(UpperCAmelCase_ , 'w+' ) as readme_file:
readme_file.write(UpperCAmelCase_ )
__snake_case : List[Any] = expected_error.format(path=UpperCAmelCase_ )
with pytest.raises(UpperCAmelCase_ , match=re.escape(UpperCAmelCase_ ) ):
__snake_case : Any = ReadMe.from_readme(UpperCAmelCase_ , UpperCAmelCase_ )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Tuple = Path(UpperCAmelCase_ ) / 'README.md'
with open(UpperCAmelCase_ , 'w+' ) as readme_file:
readme_file.write(UpperCAmelCase_ )
__snake_case : List[str] = expected_error.format(path=UpperCAmelCase_ )
with pytest.raises(UpperCAmelCase_ , match=re.escape(UpperCAmelCase_ ) ):
ReadMe.from_readme(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __UpperCAmelCase ( UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case : Optional[int] = Path(UpperCAmelCase_ ) / 'README.md'
with open(UpperCAmelCase_ , 'w+' ) as readme_file:
readme_file.write(UpperCAmelCase_ )
ReadMe.from_readme(UpperCAmelCase_ , UpperCAmelCase_ , suppress_parsing_errors=UpperCAmelCase_ )
| 192 | 1 |
'''simple docstring'''
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
snake_case_ = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def __lowercase (_SCREAMING_SNAKE_CASE :Union[str, Any] ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_SCREAMING_SNAKE_CASE )
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[int] ):
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : Any = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
| 507 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_UpperCamelCase : str = logging.get_logger(__name__)
class _lowercase( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: List[Any] ,*a: Dict ,**a: Union[str, Any] ):
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' ,a ,)
super().__init__(*a ,**a )
| 396 | 0 |
"""simple docstring"""
from functools import reduce
a__ : List[str] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def A__ ( __lowerCamelCase = N ):
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __lowerCamelCase, __lowerCamelCase : str(int(__lowerCamelCase ) * int(__lowerCamelCase ) ), n[i : i + 1_3] ) )
for i in range(len(__lowerCamelCase ) - 1_2 ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 309 |
"""simple docstring"""
import operator as op
a__ : Optional[int] = """scaler.pt"""
a__ : Dict = """pytorch_model"""
a__ : List[Any] = """random_states"""
a__ : Union[str, Any] = """optimizer"""
a__ : Tuple = """scheduler"""
a__ : Any = """pytorch_model.bin"""
a__ : int = """pytorch_model.bin.index.json"""
a__ : Union[str, Any] = """model.safetensors"""
a__ : Optional[int] = """model.safetensors.index.json"""
a__ : str = """1.10.2"""
a__ : int = """py38"""
a__ : Any = """4.17.0"""
a__ : List[str] = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
a__ : str = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
a__ : Optional[int] = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
a__ : int = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
a__ : int = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
a__ : int = """2.0.1"""
a__ : Optional[Any] = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
a__ : int = ["""default""", """reduce-overhead""", """max-autotune"""]
a__ : Optional[Any] = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
a__ : Any = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
a__ : List[str] = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
a__ : Dict = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 309 | 1 |
from heapq import heappop, heappush
import numpy as np
def __lowerCAmelCase ( A , A , A , A , ):
UpperCAmelCase_ , UpperCAmelCase_ = grid.shape
UpperCAmelCase_ = [-1, 1, 0, 0]
UpperCAmelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
UpperCAmelCase_ , UpperCAmelCase_ = [(0, source)], set()
UpperCAmelCase_ = np.full((rows, cols) , np.inf )
UpperCAmelCase_ = 0
UpperCAmelCase_ = np.empty((rows, cols) , dtype=A )
UpperCAmelCase_ = None
while queue:
((UpperCAmelCase_) , (UpperCAmelCase_)) = heappop(A )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
UpperCAmelCase_ = []
while (x, y) != source:
path.append((x, y) )
UpperCAmelCase_ , UpperCAmelCase_ = predecessors[x, y]
path.append(A ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(A ) ):
UpperCAmelCase_ , UpperCAmelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
UpperCAmelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(A , (dist + 1, (nx, ny)) )
UpperCAmelCase_ = dist + 1
UpperCAmelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 162 |
from __future__ import annotations
import pandas as pd
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = [0] * no_of_processes
UpperCAmelCase_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A ):
UpperCAmelCase_ = burst_time[i]
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 999999999
UpperCAmelCase_ = 0
UpperCAmelCase_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase_ = remaining_time[j]
UpperCAmelCase_ = j
UpperCAmelCase_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase_ = remaining_time[short]
if minm == 0:
UpperCAmelCase_ = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase_ = False
# Find finish time of current process
UpperCAmelCase_ = increment_time + 1
# Calculate waiting time
UpperCAmelCase_ = finish_time - arrival_time[short]
UpperCAmelCase_ = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase_ = 0
# Increment time
increment_time += 1
return waiting_time
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = [0] * no_of_processes
for i in range(A ):
UpperCAmelCase_ = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(A ):
UpperCAmelCase_ = total_waiting_time + waiting_time[i]
UpperCAmelCase_ = total_turn_around_time + turn_around_time[i]
print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_a: str = int(input())
_a: List[str] = [0] * no_of_processes
_a: Dict = [0] * no_of_processes
_a: Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_a , _a: List[str] = map(int, input().split())
_a: List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_a: Optional[int] = burst_time
_a: List[Any] = no_of_processes
_a: List[Any] = waiting_time
_a: List[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_a: Any = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs) | 162 | 1 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :List[str] = nn.Linear(3 , 4 )
UpperCamelCase__ :str = nn.BatchNormad(4 )
UpperCamelCase__ :Tuple = nn.Linear(4 , 5 )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_ ) ) )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :str = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , model.state_dict() )
UpperCamelCase__ :Optional[Any] = os.path.join(UpperCamelCase_ , '''index.json''' )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase__ :Union[str, Any] = os.path.join(UpperCamelCase_ , F'''{key}.dat''' )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# TODO: add tests on the fact weights are properly loaded
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase__ :str = torch.randn(2 , 3 , dtype=UpperCamelCase_ )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase__ :str = offload_weight(UpperCamelCase_ , '''weight''' , UpperCamelCase_ , {} )
UpperCamelCase__ :Tuple = os.path.join(UpperCamelCase_ , '''weight.dat''' )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
self.assertDictEqual(UpperCamelCase_ , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(UpperCamelCase_ ).split('''.''' )[1]}} )
UpperCamelCase__ :Tuple = load_offloaded_weight(UpperCamelCase_ , index['''weight'''] )
self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = ModelForTest()
UpperCamelCase__ :List[str] = model.state_dict()
UpperCamelCase__ :Dict = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
UpperCamelCase__ :Union[str, Any] = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :int = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key] ) )
UpperCamelCase__ :Dict = {k: v for k, v in state_dict.items() if '''weight''' in k}
UpperCamelCase__ :Dict = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Dict = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_ )
# Duplicates are removed
UpperCamelCase__ :Optional[int] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key] ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
UpperCamelCase__ :Tuple = extract_submodules_state_dict(UpperCamelCase_ , ['''a.1''', '''a.2'''] )
self.assertDictEqual(UpperCamelCase_ , {'''a.1''': 0, '''a.2''': 2} )
UpperCamelCase__ :int = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
UpperCamelCase__ :Optional[Any] = extract_submodules_state_dict(UpperCamelCase_ , ['''a.1''', '''a.2'''] )
self.assertDictEqual(UpperCamelCase_ , {'''a.1.a''': 0, '''a.2.a''': 2} ) | 280 |
'''simple docstring'''
import json
import sys
def a ( __a , __a ) -> str:
'''simple docstring'''
with open(__a , encoding='''utf-8''' ) as f:
UpperCamelCase__ :List[str] = json.load(__a )
UpperCamelCase__ :int = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(__a ):
UpperCamelCase__ :Optional[Any] = results[benchmark_name]
UpperCamelCase__ :int = benchmark_name.split('''/''' )[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''' )
UpperCamelCase__ :List[str] = '''| metric |'''
UpperCamelCase__ :str = '''|--------|'''
UpperCamelCase__ :Union[str, Any] = '''| new / old (diff) |'''
for metric_name in sorted(__a ):
UpperCamelCase__ :List[Any] = benchmark_res[metric_name]
UpperCamelCase__ :Optional[int] = metric_vals['''new''']
UpperCamelCase__ :Any = metric_vals.get('''old''' , __a )
UpperCamelCase__ :Optional[int] = metric_vals.get('''diff''' , __a )
UpperCamelCase__ :List[str] = f''' {new_val:f}''' if isinstance(__a , (int, float) ) else '''None'''
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(__a , (int, float) ) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(__a , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''' )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.writelines('''\n'''.join(__a ) )
if __name__ == "__main__":
__snake_case = sys.argv[1]
__snake_case = sys.argv[2]
format_json_to_md(input_json_file, output_md_file) | 280 | 1 |
'''simple docstring'''
from PIL import Image
def A_( A : Image , A : float):
def brightness(A : int) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)')
return img.point(A)
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
lowerCAmelCase : Dict = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 3 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : str = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__magic_name__ : Optional[Any] = ''
__magic_name__ : Optional[int] = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(lowerCAmelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__magic_name__ , __magic_name__ : str = 0, 0
# length[i] shows the length of palindromic substring with center i
__magic_name__ : Dict = [1 for i in range(len(lowerCAmelCase ) )]
# for each character in new_string find corresponding palindromic string
__magic_name__ : Tuple = 0
for j in range(len(lowerCAmelCase ) ):
__magic_name__ : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(lowerCAmelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__magic_name__ : Union[str, Any] = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__magic_name__ : Union[str, Any] = j - k + 1 # noqa: E741
__magic_name__ : Any = j + k - 1
# update max_length and start position
if max_length < length[j]:
__magic_name__ : Tuple = length[j]
__magic_name__ : Tuple = j
# create that string
__magic_name__ : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod() | 561 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
UpperCamelCase_ = parser.parse_args()
if args.model_type == "roberta":
UpperCamelCase_ = RobertaForMaskedLM.from_pretrained(args.model_name)
UpperCamelCase_ = """roberta"""
elif args.model_type == "gpt2":
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained(args.model_name)
UpperCamelCase_ = """transformer"""
UpperCamelCase_ = model.state_dict()
UpperCamelCase_ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
UpperCamelCase_ = state_dict[f'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
UpperCamelCase_ = f'''{prefix}.embeddings.{w}.weight'''
UpperCamelCase_ = state_dict[param_name]
for w in ["weight", "bias"]:
UpperCamelCase_ = f'''{prefix}.embeddings.LayerNorm.{w}'''
UpperCamelCase_ = state_dict[param_name]
# Transformer Blocks #
UpperCamelCase_ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
UpperCamelCase_ = state_dict[f'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
UpperCamelCase_ = state_dict[f'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''lm_head.dense.{w}''']
UpperCamelCase_ = state_dict[f'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
UpperCamelCase_ = state_dict[f'''{prefix}.ln_f.{w}''']
UpperCamelCase_ = state_dict["""lm_head.weight"""]
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 88 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ : list[list[int]] ) -> bool:
lowercase : str =len(__magic_name__ )
# We need to create solution object to save path.
lowercase : int =[[0 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
lowercase : List[Any] =run_maze(__magic_name__ , 0 , 0 , __magic_name__ )
if solved:
print('''\n'''.join(str(__magic_name__ ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def _lowerCAmelCase ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[list[int]] ) -> bool:
lowercase : Optional[int] =len(__magic_name__ )
# Final check point.
if i == j == (size - 1):
lowercase : Optional[int] =1
return True
lowercase : Optional[int] =(not i < 0) and (not j < 0) # Check lower bounds
lowercase : Tuple =(i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowercase : Union[str, Any] =(not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowercase : Union[str, Any] =1
# check for directions
if (
run_maze(__magic_name__ , i + 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j + 1 , __magic_name__ )
or run_maze(__magic_name__ , i - 1 , __magic_name__ , __magic_name__ )
or run_maze(__magic_name__ , __magic_name__ , j - 1 , __magic_name__ )
):
return True
lowercase : str =0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = 0
for i in range(1 , 10_01 ):
total += i**i
return str(_lowerCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 384 | from __future__ import annotations
import math
A_ = "2020.9.26"
A_ = "xcodz-dot, cclaus, dhruvmanila"
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> tuple[float, float]:
"""simple docstring"""
if not all(isinstance(UpperCAmelCase, (float, int) ) for val in locals().values() ):
lowercase = f'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(UpperCAmelCase )
lowercase = ((x * distance) / (z + distance)) * scale
lowercase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> tuple[float, float, float]:
"""simple docstring"""
if not isinstance(UpperCAmelCase, UpperCAmelCase ):
raise TypeError('''Axis must be a str''' )
lowercase = locals()
del input_variables["axis"]
if not all(isinstance(UpperCAmelCase, (float, int) ) for val in input_variables.values() ):
lowercase = (
'''Input values except axis must either be float or int: '''
f'{list(input_variables.values() )}'
)
raise TypeError(UpperCAmelCase )
lowercase = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowercase = x * math.cos(UpperCAmelCase ) - y * math.sin(UpperCAmelCase )
lowercase = y * math.cos(UpperCAmelCase ) + x * math.sin(UpperCAmelCase )
lowercase = z
elif axis == "x":
lowercase = y * math.cos(UpperCAmelCase ) - z * math.sin(UpperCAmelCase )
lowercase = z * math.cos(UpperCAmelCase ) + y * math.sin(UpperCAmelCase )
lowercase = x
elif axis == "y":
lowercase = x * math.cos(UpperCAmelCase ) - z * math.sin(UpperCAmelCase )
lowercase = z * math.cos(UpperCAmelCase ) + x * math.sin(UpperCAmelCase )
lowercase = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }")
print(F"{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }")
| 604 | 0 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowercase__ :
"""simple docstring"""
def lowercase ( self : Optional[Any] ):
torch.manual_seed(0 )
snake_case__ : Tuple = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
snake_case__ : Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case__ : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=__a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
snake_case__ : Optional[int] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase ( self : Any ):
torch.manual_seed(0 )
snake_case__ : Tuple = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
snake_case__ : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
snake_case__ : List[Any] = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.414 , time_embedding_act_fn="""gelu""" , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , thresholding=__a , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
snake_case__ : Optional[int] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
snake_case__ : int = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase ( self : Union[str, Any] ):
snake_case__ : Tuple = self.get_dummy_components()
snake_case__ : int = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
snake_case__ : Optional[Any] = self.get_dummy_inputs(__a )
snake_case__ : Union[str, Any] = inputs["""prompt"""]
snake_case__ : List[str] = inputs["""generator"""]
snake_case__ : Any = inputs["""num_inference_steps"""]
snake_case__ : Optional[Any] = inputs["""output_type"""]
if "image" in inputs:
snake_case__ : Optional[Any] = inputs["""image"""]
else:
snake_case__ : Tuple = None
if "mask_image" in inputs:
snake_case__ : Union[str, Any] = inputs["""mask_image"""]
else:
snake_case__ : Optional[int] = None
if "original_image" in inputs:
snake_case__ : str = inputs["""original_image"""]
else:
snake_case__ : Union[str, Any] = None
snake_case__ , snake_case__ : List[Any] = pipe.encode_prompt(__a )
# inputs with prompt converted to embeddings
snake_case__ : int = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
snake_case__ : int = image
if mask_image is not None:
snake_case__ : Optional[Any] = mask_image
if original_image is not None:
snake_case__ : str = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__a , __a , __a )
snake_case__ : str = pipe(**__a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__a )
snake_case__ : List[Any] = self.pipeline_class.from_pretrained(__a )
pipe_loaded.to(__a )
pipe_loaded.set_progress_bar_config(disable=__a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__a , __a ) is None , f'`{optional_component}` did not stay set to None after loading.' , )
snake_case__ : Dict = self.get_dummy_inputs(__a )
snake_case__ : List[Any] = inputs["""generator"""]
snake_case__ : Dict = inputs["""num_inference_steps"""]
snake_case__ : Union[str, Any] = inputs["""output_type"""]
# inputs with prompt converted to embeddings
snake_case__ : str = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
snake_case__ : Dict = image
if mask_image is not None:
snake_case__ : Any = mask_image
if original_image is not None:
snake_case__ : Union[str, Any] = original_image
snake_case__ : Tuple = pipe_loaded(**__a )[0]
snake_case__ : int = np.abs(to_np(__a ) - to_np(__a ) ).max()
self.assertLess(__a , 1e-4 )
def lowercase ( self : Dict ):
snake_case__ : Any = self.get_dummy_components()
snake_case__ : List[str] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
snake_case__ : int = self.get_dummy_inputs(__a )
snake_case__ : Dict = pipe(**__a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__a )
snake_case__ : str = self.pipeline_class.from_pretrained(__a )
pipe_loaded.to(__a )
pipe_loaded.set_progress_bar_config(disable=__a )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
snake_case__ : int = self.get_dummy_inputs(__a )
snake_case__ : Union[str, Any] = pipe_loaded(**__a )[0]
snake_case__ : Tuple = np.abs(to_np(__a ) - to_np(__a ) ).max()
self.assertLess(__a , 1e-4 )
| 127 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase_: List[Any] = logging.get_logger(__name__)
class lowercase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Dict ):
snake_case__ : List[str] = question_encoder
snake_case__ : Union[str, Any] = generator
snake_case__ : List[Any] = self.question_encoder
def lowercase ( self : Dict , __a : Dict ):
if os.path.isfile(__a ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(__a , exist_ok=__a )
snake_case__ : Union[str, Any] = os.path.join(__a , """question_encoder_tokenizer""" )
snake_case__ : Tuple = os.path.join(__a , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__a )
self.generator.save_pretrained(__a )
@classmethod
def lowercase ( cls : Any , __a : str , **__a : Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case__ : List[str] = kwargs.pop("""config""" , __a )
if config is None:
snake_case__ : Union[str, Any] = RagConfig.from_pretrained(__a )
snake_case__ : int = AutoTokenizer.from_pretrained(
__a , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
snake_case__ : Any = AutoTokenizer.from_pretrained(
__a , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__a , generator=__a )
def __call__( self : Dict , *__a : List[str] , **__a : List[Any] ):
return self.current_tokenizer(*__a , **__a )
def lowercase ( self : Union[str, Any] , *__a : Dict , **__a : Optional[int] ):
return self.generator.batch_decode(*__a , **__a )
def lowercase ( self : Tuple , *__a : Tuple , **__a : str ):
return self.generator.decode(*__a , **__a )
def lowercase ( self : List[str] ):
snake_case__ : List[Any] = self.question_encoder
def lowercase ( self : int ):
snake_case__ : Optional[int] = self.generator
def lowercase ( self : str , __a : List[str] , __a : Optional[List[str]] = None , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "longest" , __a : str = None , __a : bool = True , **__a : str , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __a , )
if max_length is None:
snake_case__ : Optional[Any] = self.current_tokenizer.model_max_length
snake_case__ : Any = self(
__a , add_special_tokens=__a , return_tensors=__a , max_length=__a , padding=__a , truncation=__a , **__a , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case__ : Optional[int] = self.current_tokenizer.model_max_length
snake_case__ : str = self(
text_target=__a , add_special_tokens=__a , return_tensors=__a , padding=__a , max_length=__a , truncation=__a , **__a , )
snake_case__ : Optional[Any] = labels["""input_ids"""]
return model_inputs
| 127 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def A_ ( lowercase_ ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
def A_ ( lowercase_ ) -> Optional[int]:
# word like '180' or '身高' or '神'
for char in word:
_snake_case : List[Any] = ord(lowercase_ )
if not _is_chinese_char(lowercase_ ):
return 0
return 1
def A_ ( lowercase_ ) -> Any:
_snake_case : Dict = set()
for token in tokens:
_snake_case : Any = len(lowercase_ ) > 1 and is_chinese(lowercase_ )
if chinese_word:
word_set.add(lowercase_ )
_snake_case : int = list(lowercase_ )
return word_list
def A_ ( lowercase_ , lowercase_ ) -> int:
if not chinese_word_set:
return bert_tokens
_snake_case : str = max([len(lowercase_ ) for w in chinese_word_set] )
_snake_case : Optional[int] = bert_tokens
_snake_case , _snake_case : List[str] = 0, len(lowercase_ )
while start < end:
_snake_case : Optional[int] = True
if is_chinese(bert_word[start] ):
_snake_case : str = min(end - start , lowercase_ )
for i in range(lowercase_ , 1 , -1 ):
_snake_case : Optional[int] = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_snake_case : Dict = '''##''' + bert_word[j]
_snake_case : List[Any] = start + i
_snake_case : Dict = False
break
if single_word:
start += 1
return bert_word
def A_ ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
_snake_case : Optional[int] = []
for i in range(0 , len(lowercase_ ) , 100 ):
_snake_case : str = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_snake_case : int = [get_chinese_word(lowercase_ ) for r in res]
ltp_res.extend(lowercase_ )
assert len(lowercase_ ) == len(lowercase_ )
_snake_case : List[str] = []
for i in range(0 , len(lowercase_ ) , 100 ):
_snake_case : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowercase_ , truncation=lowercase_ , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(lowercase_ ) == len(lowercase_ )
_snake_case : str = []
for input_ids, chinese_word in zip(lowercase_ , lowercase_ ):
_snake_case : Any = []
for id in input_ids:
_snake_case : Optional[Any] = bert_tokenizer._convert_id_to_token(lowercase_ )
input_tokens.append(lowercase_ )
_snake_case : int = add_sub_symbol(lowercase_ , lowercase_ )
_snake_case : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase_ ):
if token[:2] == "##":
_snake_case : List[str] = token[2:]
# save chinese tokens' pos
if len(lowercase_ ) == 1 and _is_chinese_char(ord(lowercase_ ) ):
ref_id.append(lowercase_ )
ref_ids.append(lowercase_ )
assert len(lowercase_ ) == len(lowercase_ )
return ref_ids
def A_ ( lowercase_ ) -> Optional[int]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
_snake_case : Union[str, Any] = f.readlines()
_snake_case : str = [line.strip() for line in data if len(lowercase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_snake_case : Optional[Any] = LTP(args.ltp ) # faster in GPU device
_snake_case : Optional[int] = BertTokenizer.from_pretrained(args.bert )
_snake_case : Optional[Any] = prepare_ref(lowercase_ , lowercase_ , lowercase_ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
_snake_case : List[Any] = [json.dumps(lowercase_ ) + '''\n''' for ref in ref_ids]
f.writelines(lowercase_ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
lowerCAmelCase_ = parser.parse_args()
main(args)
| 326 |
lowerCAmelCase_ = {
0: "0",
1: "1",
2: "2",
3: "3",
4: "4",
5: "5",
6: "6",
7: "7",
8: "8",
9: "9",
1_0: "a",
1_1: "b",
1_2: "c",
1_3: "d",
1_4: "e",
1_5: "f",
}
def A_ ( lowercase_ ) -> str:
assert type(lowercase_ ) in (int, float) and decimal == int(lowercase_ )
_snake_case : str = int(lowercase_ )
_snake_case : Any = ''''''
_snake_case : List[Any] = False
if decimal < 0:
_snake_case : List[str] = True
decimal *= -1
while decimal > 0:
_snake_case , _snake_case : Tuple = divmod(lowercase_ , 16 )
_snake_case : Tuple = values[remainder] + hexadecimal
_snake_case : List[str] = '''0x''' + hexadecimal
if negative:
_snake_case : int = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = CustomTokenizer
pass
| 676 |
def A ( snake_case__ : int ) -> bool:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
__snake_case = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
__snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 1 |
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = {"""vocab_file""": """spiece.model"""}
UpperCamelCase_ : int = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
UpperCamelCase_ : Any = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
class __lowercase ( __snake_case ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = ["input_ids", "attention_mask"]
_A = []
def __init__(self : str , snake_case : Dict , snake_case : int="<unk>" , snake_case : Dict="<s>" , snake_case : Union[str, Any]="</s>" , snake_case : Optional[Any]="<pad>" , snake_case : int="[SEP]" , snake_case : List[str]="[MASK]" , snake_case : Optional[int]="[CLS]" , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Union[str, Any] , ) -> None:
_lowercase : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
_lowercase : Tuple = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
_lowercase : Union[str, Any] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
_lowercase : int = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
_lowercase : List[str] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
_lowercase : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_lowercase : int = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
_lowercase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , pad_token=snake_case , sep_token=snake_case , mask_token=snake_case , cls_token=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
_lowercase : Dict = vocab_file
_lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _a(self : int ) -> Optional[Any]:
return self.sp_model.get_piece_size()
def _a(self : Optional[Any] ) -> List[str]:
_lowercase : str = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : List[str] ) -> Dict:
_lowercase : Union[str, Any] = self.__dict__.copy()
_lowercase : Tuple = None
return state
def __setstate__(self : Optional[Any] , snake_case : Union[str, Any] ) -> List[str]:
_lowercase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowercase : Dict = {}
_lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a(self : List[str] , snake_case : str ) -> List[str]:
return self.sp_model.encode(snake_case , out_type=snake_case )
def _a(self : List[Any] , snake_case : Dict ) -> Dict:
return self.sp_model.piece_to_id(snake_case )
def _a(self : Any , snake_case : Optional[Any] ) -> Any:
_lowercase : Any = self.sp_model.IdToPiece(snake_case )
return token
def _a(self : Optional[Any] , snake_case : Dict ) -> int:
_lowercase : Optional[Any] = []
_lowercase : Optional[int] = ""
_lowercase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case ) + token
_lowercase : List[Any] = True
_lowercase : List[str] = []
else:
current_sub_tokens.append(snake_case )
_lowercase : List[Any] = False
out_string += self.sp_model.decode(snake_case )
return out_string.strip()
def _a(self : Optional[Any] , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ) -> str:
_lowercase : Optional[Any] = kwargs.pop("use_source_tokenizer" , snake_case )
_lowercase : Dict = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowercase : List[str] = []
_lowercase : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
_lowercase : int = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_lowercase : Tuple = re.sub(R" (\[(MASK|SEP)\])" , R"\1" , " ".join(snake_case ) )
else:
_lowercase : Dict = "".join(snake_case )
_lowercase : Optional[int] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowercase : Dict = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _a(self : List[Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowercase : str = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , "wb" ) as fi:
_lowercase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
def _a(self : Union[str, Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowercase : str = [self.cls_token_id]
_lowercase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a(self : List[Any] , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def _a(self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
_lowercase : List[str] = [self.sep_token_id]
_lowercase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 461 |
from __future__ import annotations
def UpperCamelCase ( _UpperCAmelCase : str , _UpperCAmelCase : list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
_lowercase : Dict = word_bank or []
# create a table
_lowercase : int = len(_UpperCAmelCase ) + 1
_lowercase : list[list[list[str]]] = []
for _ in range(_UpperCAmelCase ):
table.append([] )
# seed value
_lowercase : Optional[int] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_UpperCAmelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_UpperCAmelCase )] == word:
_lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_UpperCAmelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_UpperCAmelCase )]:
combination.reverse()
return table[len(_UpperCAmelCase )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 461 | 1 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__lowerCamelCase = '''\
'''
__lowerCamelCase = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
__lowerCamelCase = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
"""simple docstring"""
def A_ ( self):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"input_texts": datasets.Value("string"),
}) ,reference_urls=["https://huggingface.co/docs/transformers/perplexity"] ,)
def A_ ( self ,lowercase ,lowercase ,lowercase = 16 ,lowercase = True ,lowercase=None):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCAmelCase_ : Any = "cuda"
else:
UpperCAmelCase_ : List[str] = "cuda" if torch.cuda.is_available() else "cpu"
UpperCAmelCase_ : int = AutoModelForCausalLM.from_pretrained(_snake_case)
UpperCAmelCase_ : Tuple = model.to(_snake_case)
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_snake_case)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCAmelCase_ : Tuple = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(_snake_case) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCAmelCase_ : int = model.config.max_length - 1
else:
UpperCAmelCase_ : Optional[int] = model.config.max_length
UpperCAmelCase_ : List[Any] = tokenizer(
_snake_case ,add_special_tokens=_snake_case ,padding=_snake_case ,truncation=_snake_case ,max_length=_snake_case ,return_tensors="pt" ,return_attention_mask=_snake_case ,).to(_snake_case)
UpperCAmelCase_ : Optional[Any] = encodings["input_ids"]
UpperCAmelCase_ : Dict = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) ,1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) ,2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : List[Any] = CrossEntropyLoss(reduction="none")
for start_index in logging.tqdm(range(0 ,len(_snake_case) ,_snake_case)):
UpperCAmelCase_ : List[str] = min(start_index + batch_size ,len(_snake_case))
UpperCAmelCase_ : List[Any] = encoded_texts[start_index:end_index]
UpperCAmelCase_ : Optional[int] = attn_masks[start_index:end_index]
if add_start_token:
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(_snake_case)
UpperCAmelCase_ : int = torch.cat([bos_tokens_tensor, encoded_batch] ,dim=1)
UpperCAmelCase_ : List[str] = torch.cat(
[torch.ones(bos_tokens_tensor.size() ,dtype=torch.intaa).to(_snake_case), attn_mask] ,dim=1)
UpperCAmelCase_ : Optional[int] = encoded_batch
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(_snake_case ,attention_mask=_snake_case).logits
UpperCAmelCase_ : Optional[Any] = out_logits[..., :-1, :].contiguous()
UpperCAmelCase_ : Any = labels[..., 1:].contiguous()
UpperCAmelCase_ : Any = attn_mask[..., 1:].contiguous()
UpperCAmelCase_ : Dict = torch.expa(
(loss_fct(shift_logits.transpose(1 ,2) ,_snake_case) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_snake_case)}
| 708 |
import operator
def _snake_case ( __snake_case , __snake_case = False , __snake_case = None ) -> list:
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = operator.lt if reverse else operator.gt
UpperCAmelCase_ : int = solution or []
if not arr:
return solution
UpperCAmelCase_ : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(__snake_case ):
if _operator(__snake_case , sublist[-1] ):
sublist.append(__snake_case )
arr.pop(__snake_case )
# merging sublist into solution list
if not solution:
solution.extend(__snake_case )
else:
while sublist:
UpperCAmelCase_ : Optional[int] = sublist.pop(0 )
for i, xx in enumerate(__snake_case ):
if not _operator(__snake_case , __snake_case ):
solution.insert(__snake_case , __snake_case )
break
else:
solution.append(__snake_case )
strand_sort(__snake_case , __snake_case , __snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 455 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ):
raise TypeError('''Input value must be an \'int\' type''' )
A__ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: int , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: int=13 , __lowerCAmelCase: Any=7 , __lowerCAmelCase: List[Any]=True , __lowerCAmelCase: Dict=True , __lowerCAmelCase: Union[str, Any]=True , __lowerCAmelCase: List[Any]=True , __lowerCAmelCase: int=99 , __lowerCAmelCase: Dict=64 , __lowerCAmelCase: Optional[Any]=32 , __lowerCAmelCase: Tuple=5 , __lowerCAmelCase: List[str]=4 , __lowerCAmelCase: Tuple=37 , __lowerCAmelCase: Any="gelu" , __lowerCAmelCase: Union[str, Any]=0.1 , __lowerCAmelCase: List[Any]=0.1 , __lowerCAmelCase: int=512 , __lowerCAmelCase: Union[str, Any]=16 , __lowerCAmelCase: Dict=2 , __lowerCAmelCase: Tuple=0.02 , __lowerCAmelCase: Dict=3 , __lowerCAmelCase: Optional[int]=4 , __lowerCAmelCase: Union[str, Any]=None , ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = embedding_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
def _UpperCAmelCase ( self: Dict ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Any , __lowerCAmelCase: Dict , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = MegatronBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
__UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
__UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: int , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: List[str] , __lowerCAmelCase: str , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: Dict , __lowerCAmelCase: Optional[int] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: str , __lowerCAmelCase: Dict , __lowerCAmelCase: Tuple , __lowerCAmelCase: Tuple ) -> Any:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: Any , __lowerCAmelCase: Tuple , __lowerCAmelCase: Any , __lowerCAmelCase: Any , __lowerCAmelCase: Dict , __lowerCAmelCase: Dict , __lowerCAmelCase: Optional[int] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: str , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: int , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: Dict ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: int , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Tuple ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: int , __lowerCAmelCase: Tuple , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: int , __lowerCAmelCase: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self: str , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: str , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Dict , __lowerCAmelCase: Tuple , __lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Dict , __lowerCAmelCase: Tuple , __lowerCAmelCase: Tuple , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: str , __lowerCAmelCase: int ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = MegatronBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self: Dict ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : str = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Optional[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[int] = True
# test_resize_embeddings = False
lowerCAmelCase__ : Any = False
def _UpperCAmelCase ( self: int , __lowerCAmelCase: Any , __lowerCAmelCase: str , __lowerCAmelCase: int=False ) -> str:
'''simple docstring'''
__UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
__UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def _UpperCAmelCase ( self: int ) -> Any:
'''simple docstring'''
__UpperCAmelCase = MegatronBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Tuple ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowerCAmelCase )
def _UpperCAmelCase ( self: List[str] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowerCAmelCase )
def _UpperCAmelCase ( self: int ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowerCAmelCase )
def __lowerCAmelCase ( A_ : List[str] ) -> List[Any]:
return torch.tensor(
A_ , dtype=torch.long , device=A_ , )
a_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip("Model is not available." )
def _UpperCAmelCase ( self: List[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
__UpperCAmelCase = os.path.join(os.environ["MYDIR"] , __lowerCAmelCase )
__UpperCAmelCase = MegatronBertModel.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.half()
__UpperCAmelCase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__UpperCAmelCase = model(__lowerCAmelCase )[0]
__UpperCAmelCase = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , __lowerCAmelCase )
__UpperCAmelCase = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__UpperCAmelCase = output[0, ii, jj]
__UpperCAmelCase = expected[3 * ii + jj]
__UpperCAmelCase = "ii={} jj={} a={} b={}".format(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(math.isclose(__lowerCAmelCase , __lowerCAmelCase , rel_tol=__lowerCAmelCase , abs_tol=__lowerCAmelCase ) , msg=__lowerCAmelCase )
| 221 | 0 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Tuple =checkpoints.load_tax_checkpoint(lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =flatten_dict(lowercase )
return flax_params
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: int ={}
SCREAMING_SNAKE_CASE_: Optional[Any] ={
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
SCREAMING_SNAKE_CASE_: Dict ={
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
SCREAMING_SNAKE_CASE_: int =""".""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE_: List[Any] =new_key.replace(lowercase , lowercase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE_: List[Any] =new_key.replace(lowercase , lowercase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE_: Dict =re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , lowercase )
SCREAMING_SNAKE_CASE_: Dict =new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE_: Optional[int] =re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , lowercase )
SCREAMING_SNAKE_CASE_: Tuple =flax_dict[key]
SCREAMING_SNAKE_CASE_: str ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
SCREAMING_SNAKE_CASE_: int =torch.from_numpy(converted_dict[key].T )
else:
SCREAMING_SNAKE_CASE_: List[Any] =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def __magic_name__ ( lowercase , lowercase , lowercase=False , lowercase=False ):
SCREAMING_SNAKE_CASE_: Optional[int] =get_flax_param(lowercase )
if not use_large:
SCREAMING_SNAKE_CASE_: str =PixaStructVisionConfig()
SCREAMING_SNAKE_CASE_: Optional[int] =PixaStructTextConfig()
else:
SCREAMING_SNAKE_CASE_: List[Any] =PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
SCREAMING_SNAKE_CASE_: Union[str, Any] =PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
SCREAMING_SNAKE_CASE_: List[Any] =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowercase )
SCREAMING_SNAKE_CASE_: Tuple =PixaStructForConditionalGeneration(lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =rename_and_convert_flax_params(lowercase )
model.load_state_dict(lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
SCREAMING_SNAKE_CASE_: Dict =PixaStructImageProcessor()
SCREAMING_SNAKE_CASE_: str =PixaStructProcessor(image_processor=lowercase , tokenizer=lowercase )
if use_large:
SCREAMING_SNAKE_CASE_: Tuple =4096
SCREAMING_SNAKE_CASE_: Dict =True
# mkdir if needed
os.makedirs(lowercase , exist_ok=lowercase )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
print("""Model saved in {}""".format(lowercase ) )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""")
parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""")
_UpperCAmelCase = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""DPTFeatureExtractor"""]
_UpperCAmelCase = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 36 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__a )
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :str = field(default="""automatic-speech-recognition""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
__SCREAMING_SNAKE_CASE :ClassVar[Features] = Features({"""audio""": Audio()} )
__SCREAMING_SNAKE_CASE :ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
__SCREAMING_SNAKE_CASE :str = "audio"
__SCREAMING_SNAKE_CASE :str = "transcription"
def snake_case__ ( self : Union[str, Any] , a__ : List[str] ):
if self.audio_column not in features:
raise ValueError(F'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , a__ ):
raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' )
__magic_name__ = copy.deepcopy(self )
__magic_name__ = self.input_schema.copy()
__magic_name__ = features[self.audio_column]
__magic_name__ = input_schema
return task_template
@property
def snake_case__ ( self : str ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 432 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase ( a , a , a ) -> float:
'''simple docstring'''
__magic_name__ = x
__magic_name__ = y
for step in range(a ): # noqa: B007
__magic_name__ = a * a - b * b + x
__magic_name__ = 2 * a * b + y
__magic_name__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase ( a ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCamelCase ( a ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def UpperCamelCase ( a = 800 , a = 600 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
'''simple docstring'''
__magic_name__ = Image.new('''RGB''' , (image_width, image_height) )
__magic_name__ = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__magic_name__ = figure_width / image_width * image_height
__magic_name__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
__magic_name__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
__magic_name__ = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__magic_name__ = get_color_coded_rgb(a )
else:
__magic_name__ = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowerCAmelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 432 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 1_000_000 , __SCREAMING_SNAKE_CASE = 10 )-> str:
_SCREAMING_SNAKE_CASE : defaultdict = defaultdict(__lowercase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
_SCREAMING_SNAKE_CASE : Any = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
_SCREAMING_SNAKE_CASE : List[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__lowercase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 704 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 635 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ,_a):
def _snake_case ( self : Optional[int] ) ->int:
"""simple docstring"""
a__ :Optional[Any] = load_tool("text-to-speech" )
self.tool.setup()
def _snake_case ( self : Optional[int] ) ->str:
"""simple docstring"""
torch.manual_seed(0 )
a__ :Any = self.tool("hey" )
a__ :Optional[int] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def _snake_case ( self : Dict ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a__ :List[Any] = self.tool("hey" )
a__ :int = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
| 395 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
snake_case__ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.')
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase):
def _snake_case ( self : Dict , __A : Path , __A : Union[str, None] = None , __A : Union[List[str], None] = None , __A : Union[str, List[str], None] = None , __A : bool = True , ) ->Any:
"""simple docstring"""
a__ :Dict = [file for file in os.listdir(__A ) if os.path.isfile(os.path.join(__A , __A ) )]
if identifier is not None:
a__ :Union[str, Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__A , __A ):
for n_ in n_identifier:
a__ :Union[str, Any] = [file for file in files if n_ not in file]
else:
a__ :Dict = [file for file in files if n_identifier not in file]
a__ :List[str] = ignore_files or []
ignore_files.append("__init__.py" )
a__ :Optional[Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , __A )
if only_modules:
a__ :Tuple = file.split("." )[0]
try:
a__ :Dict = getattr(__A , __A )
a__ :int = doctest.DocTestSuite(__A )
a__ :Any = unittest.TextTestRunner().run(__A )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
a__ :int = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _snake_case ( self : int ) ->List[Any]:
"""simple docstring"""
a__ :Tuple = Path("src/transformers" )
a__ :Union[str, Any] = "modeling"
a__ :Any = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(__A , identifier=__A , ignore_files=__A )
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
a__ :Optional[int] = Path("src/transformers" )
a__ :Dict = "tokenization"
self.analyze_directory(__A , identifier=__A )
def _snake_case ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a__ :List[Any] = Path("src/transformers" )
a__ :List[Any] = "configuration"
self.analyze_directory(__A , identifier=__A )
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
a__ :List[str] = Path("src/transformers" )
a__ :str = ["configuration", "modeling", "tokenization"]
self.analyze_directory(__A , n_identifier=__A )
def _snake_case ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
a__ :List[str] = Path("docs/source" )
a__ :Union[str, Any] = ["favicon.ico"]
self.analyze_directory(__A , ignore_files=__A , only_modules=__A )
| 395 | 1 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : int = 1000 ):
UpperCAmelCase : Optional[int] = 2**power
UpperCAmelCase : Optional[Any] = str(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = list(UpperCamelCase )
UpperCAmelCase : Dict = 0
for i in list_num:
sum_of_num += int(UpperCamelCase )
return sum_of_num
if __name__ == "__main__":
A: Dict = int(input("Enter the power of 2: ").strip())
print("2 ^ ", power, " = ", 2**power)
A: Union[str, Any] = solution(power)
print("Sum of the digits is: ", result)
| 359 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _snake_case ( UpperCamelCase : str = "AAPL" ):
UpperCAmelCase : Any = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
UpperCAmelCase : Optional[int] = BeautifulSoup(requests.get(UpperCamelCase ).text , """html.parser""" )
UpperCAmelCase : int = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 359 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
SCREAMING_SNAKE_CASE = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def _lowerCamelCase ( ) -> Dict:
_UpperCAmelCase : Any = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_UpperCAmelCase : Any = get_sagemaker_input()
else:
_UpperCAmelCase : List[Any] = get_cluster_input()
return config
def _lowerCamelCase ( __A : Optional[Any]=None ) -> Optional[Any]:
if subparsers is not None:
_UpperCAmelCase : Tuple = subparsers.add_parser('''config''' , description=__A )
else:
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser('''Accelerate config command''' , description=__A )
parser.add_argument(
'''--config_file''' , default=__A , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__A )
return parser
def _lowerCamelCase ( __A : Tuple ) -> Optional[int]:
_UpperCAmelCase : Optional[Any] = get_user_input()
if args.config_file is not None:
_UpperCAmelCase : str = args.config_file
else:
if not os.path.isdir(__A ):
os.makedirs(__A )
_UpperCAmelCase : Any = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(__A )
else:
config.to_yaml_file(__A )
print(f'''accelerate configuration saved at {config_file}''' )
def _lowerCamelCase ( ) -> List[Any]:
_UpperCAmelCase : List[Any] = config_command_parser()
_UpperCAmelCase : List[Any] = parser.parse_args()
config_command(__A )
if __name__ == "__main__":
main()
| 485 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
SCREAMING_SNAKE_CASE = 'src/transformers'
SCREAMING_SNAKE_CASE = 'docs/source/en'
SCREAMING_SNAKE_CASE = '.'
def _lowerCamelCase ( __A : Union[str, Any] , __A : Dict , __A : Optional[Any] ) -> Optional[Any]:
with open(__A , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_UpperCAmelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_UpperCAmelCase : Optional[int] = 0
while not lines[start_index].startswith(__A ):
start_index += 1
start_index += 1
_UpperCAmelCase : List[Any] = start_index
while not lines[end_index].startswith(__A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
SCREAMING_SNAKE_CASE = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
SCREAMING_SNAKE_CASE = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
SCREAMING_SNAKE_CASE = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
SCREAMING_SNAKE_CASE = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCamelCase ( __A : Tuple ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , __A )
return [m.group(0 ) for m in matches]
def _lowerCamelCase ( __A : List[str] , __A : Tuple ) -> List[Any]:
_UpperCAmelCase : List[str] = 2 if text == '''✅''' or text == '''❌''' else len(__A )
_UpperCAmelCase : Optional[int] = (width - text_length) // 2
_UpperCAmelCase : Union[str, Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCamelCase ( ) -> List[Any]:
_UpperCAmelCase : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase : int = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCAmelCase : Optional[int] = {name: config.replace('''Config''' , '''''' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCAmelCase : Optional[Any] = collections.defaultdict(__A )
_UpperCAmelCase : Optional[int] = collections.defaultdict(__A )
_UpperCAmelCase : Optional[int] = collections.defaultdict(__A )
_UpperCAmelCase : Optional[int] = collections.defaultdict(__A )
_UpperCAmelCase : str = collections.defaultdict(__A )
# Let's lookup through all transformers object (once).
for attr_name in dir(__A ):
_UpperCAmelCase : Any = None
if attr_name.endswith('''Tokenizer''' ):
_UpperCAmelCase : List[str] = slow_tokenizers
_UpperCAmelCase : Tuple = attr_name[:-9]
elif attr_name.endswith('''TokenizerFast''' ):
_UpperCAmelCase : Optional[Any] = fast_tokenizers
_UpperCAmelCase : str = attr_name[:-13]
elif _re_tf_models.match(__A ) is not None:
_UpperCAmelCase : List[str] = tf_models
_UpperCAmelCase : Optional[int] = _re_tf_models.match(__A ).groups()[0]
elif _re_flax_models.match(__A ) is not None:
_UpperCAmelCase : str = flax_models
_UpperCAmelCase : str = _re_flax_models.match(__A ).groups()[0]
elif _re_pt_models.match(__A ) is not None:
_UpperCAmelCase : Tuple = pt_models
_UpperCAmelCase : Optional[Any] = _re_pt_models.match(__A ).groups()[0]
if lookup_dict is not None:
while len(__A ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCAmelCase : Dict = True
break
# Try again after removing the last word in the name
_UpperCAmelCase : str = ''''''.join(camel_case_split(__A )[:-1] )
# Let's build that table!
_UpperCAmelCase : Optional[int] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCAmelCase : Tuple = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCAmelCase : Dict = [len(__A ) + 2 for c in columns]
_UpperCAmelCase : Union[str, Any] = max([len(__A ) for name in model_names] ) + 2
# Build the table per se
_UpperCAmelCase : str = '''|''' + '''|'''.join([_center_text(__A , __A ) for c, w in zip(__A , __A )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([''':''' + '''-''' * (w - 2) + ''':''' for w in widths] ) + "|\n"
_UpperCAmelCase : Optional[Any] = {True: '''✅''', False: '''❌'''}
for name in model_names:
_UpperCAmelCase : List[str] = model_name_to_prefix[name]
_UpperCAmelCase : Dict = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__A , __A ) for l, w in zip(__A , __A )] ) + "|\n"
return table
def _lowerCamelCase ( __A : Tuple=False ) -> List[str]:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = _find_text_in_file(
filename=os.path.join(__A , '''index.md''' ) , start_prompt='''<!--This table is updated automatically from the auto modules''' , end_prompt='''<!-- End table-->''' , )
_UpperCAmelCase : List[str] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__A , '''index.md''' ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'''The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 485 | 1 |
import math
def __lowerCAmelCase ( __snake_case ):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while num > 0:
__lowerCAmelCase = num % 8
__lowerCAmelCase = octal + (remainder * math.floor(math.pow(10 , __snake_case ) ))
counter += 1
__lowerCAmelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(__snake_case )}"""
def __lowerCAmelCase ( ):
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 290 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class _UpperCamelCase (a_ ):
snake_case_ = """swin2sr"""
snake_case_ = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCamelCase=6_4 , __UpperCamelCase=1 , __UpperCamelCase=3 , __UpperCamelCase=1_8_0 , __UpperCamelCase=[6, 6, 6, 6, 6, 6] , __UpperCamelCase=[6, 6, 6, 6, 6, 6] , __UpperCamelCase=8 , __UpperCamelCase=2.0 , __UpperCamelCase=True , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.1 , __UpperCamelCase="gelu" , __UpperCamelCase=False , __UpperCamelCase=0.0_2 , __UpperCamelCase=1e-5 , __UpperCamelCase=2 , __UpperCamelCase=1.0 , __UpperCamelCase="1conv" , __UpperCamelCase="pixelshuffle" , **__UpperCamelCase , )-> Tuple:
super().__init__(**__UpperCamelCase )
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = num_heads
__lowerCAmelCase = window_size
__lowerCAmelCase = mlp_ratio
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = drop_path_rate
__lowerCAmelCase = hidden_act
__lowerCAmelCase = use_absolute_embeddings
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = initializer_range
__lowerCAmelCase = upscale
__lowerCAmelCase = img_range
__lowerCAmelCase = resi_connection
__lowerCAmelCase = upsampler
| 290 | 1 |
def A ( ) -> Optional[Any]:
UpperCamelCase__ :int = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowercase__ )[-10:]
if __name__ == "__main__":
print(solution()) | 45 |
'''simple docstring'''
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( A , A , A , A ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( A ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( A ) -> Matrix | None:
if location := find_empty_location(A ):
lowerCAmelCase__ , lowerCAmelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(A , A , A , A ):
lowerCAmelCase__ = digit
if sudoku(A ) is not None:
return grid
lowerCAmelCase__ = 0
return None
def _snake_case ( A ) -> None:
for row in grid:
for cell in row:
print(A , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 90 | 0 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_lowerCAmelCase : int = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def lowerCamelCase_( _lowerCamelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : List[Any] = test_results.split(" " )
_lowerCamelCase : Dict = 0
_lowerCamelCase : Tuple = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_lowerCamelCase : Any = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_A ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = {}
_lowerCamelCase : Dict = None
_lowerCamelCase : Dict = False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , _A ):
_lowerCamelCase : str = True
_lowerCamelCase : Tuple = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_lowerCamelCase : List[Any] = line
_lowerCamelCase : Any = False
return failures
class A_ :
def __init__( self: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = title
_lowerCamelCase : Tuple = doc_test_results["time_spent"].split("," )[0]
_lowerCamelCase : Union[str, Any] = doc_test_results["success"]
_lowerCamelCase : Union[str, Any] = doc_test_results["failures"]
_lowerCamelCase : List[str] = self.n_success + self.n_failures
# Failures and success of the modeling tests
_lowerCamelCase : List[str] = doc_test_results
@property
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Tuple = [self._time_spent]
_lowerCamelCase : Optional[int] = 0
for time in time_spent:
_lowerCamelCase : Tuple = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCamelCase ) == 1:
_lowerCamelCase : List[str] = [0, 0, time_parts[0]]
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return F"""{int(__UpperCamelCase )}h{int(__UpperCamelCase )}m{int(__UpperCamelCase )}s"""
@property
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : int = 40
_lowerCamelCase : Any = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__UpperCamelCase ,__UpperCamelCase )}
_lowerCamelCase : Union[str, Any] = ""
for category, failures in category_failures.items():
if len(__UpperCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Any = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCamelCase )
@staticmethod
def _lowercase ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__UpperCamelCase )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] ,text="There was an issue running the tests." ,blocks=__UpperCamelCase ,)
def _lowercase ( self: Tuple ):
'''simple docstring'''
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_lowerCamelCase : List[str] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else "All tests passed."
_lowerCamelCase : List[str] = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] ,blocks=self.payload ,text=__UpperCamelCase ,)
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = ""
for key, value in failures.items():
_lowerCamelCase : Tuple = value[:200] + " [Truncated]" if len(__UpperCamelCase ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
_lowerCamelCase : Union[str, Any] = job_name
_lowerCamelCase : Any = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_lowerCamelCase : Dict = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _lowercase ( self: int ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_lowerCamelCase : Dict = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_lowerCamelCase : Optional[Any] = sorted(self.doc_test_results.items() ,key=lambda __lowerCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_lowerCamelCase : Dict = F"""*Num failures* :{len(job_result['failed'] )} \n"""
_lowerCamelCase : str = job_result["failures"]
_lowerCamelCase : str = self.get_reply_blocks(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,text=__UpperCamelCase )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] ,text=F"""Results for {job}""" ,blocks=__UpperCamelCase ,thread_ts=self.thread_ts["ts"] ,)
time.sleep(1 )
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = os.environ["GITHUB_RUN_ID"]
_lowerCamelCase : Union[str, Any] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_lowerCamelCase : str = requests.get(_A ).json()
_lowerCamelCase : Union[str, Any] = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCamelCase : Dict = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_A ):
_lowerCamelCase : int = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , _A )
return {}
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Dict = {}
if os.path.exists(_A ):
_lowerCamelCase : List[str] = os.listdir(_A )
for file in files:
try:
with open(os.path.join(_A , _A ) , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(_A , _A )}.""" ) from e
return _artifact
def lowerCamelCase_( ) -> int:
'''simple docstring'''
class A_ :
def __init__( self: List[Any] ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = name
_lowerCamelCase : Dict = []
def __str__( self: int ):
'''simple docstring'''
return self.name
def _lowercase ( self: Dict ,__lowerCAmelCase: str ):
'''simple docstring'''
self.paths.append({"name": self.name, "path": path} )
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Optional[Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
_lowerCamelCase : int = directory
if artifact_name not in _available_artifacts:
_lowerCamelCase : Union[str, Any] = Artifact(_A )
_available_artifacts[artifact_name].add_path(_A )
return _available_artifacts
if __name__ == "__main__":
_lowerCAmelCase : Dict = get_job_links()
_lowerCAmelCase : Dict = retrieve_available_artifacts()
_lowerCAmelCase : Optional[int] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_lowerCAmelCase : Dict = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_lowerCAmelCase : int = github_actions_job_links.get('''run_doctests''')
_lowerCAmelCase : Tuple = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_lowerCAmelCase : Optional[Any] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_lowerCAmelCase : str = handle_test_results(artifact['''stats'''])
_lowerCAmelCase : Tuple = failed
_lowerCAmelCase : int = success
_lowerCAmelCase : Any = time_spent[1:-1] + ''', '''
_lowerCAmelCase : Dict = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_lowerCAmelCase : List[Any] = line.replace('''FAILED ''', '''''')
_lowerCAmelCase : Tuple = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_lowerCAmelCase : Union[str, Any] = line.split('''::''')
else:
_lowerCAmelCase : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_lowerCAmelCase : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_lowerCAmelCase : Optional[Any] = all_failures[test] if test in all_failures else '''N/A'''
_lowerCAmelCase : List[str] = failure
break
_lowerCAmelCase : List[Any] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply() | 719 |
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ :
def __init__( self: Any ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple=13 ,__lowerCAmelCase: Optional[int]=7 ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Union[str, Any]=99 ,__lowerCAmelCase: Optional[int]=24 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Optional[Any]=6 ,__lowerCAmelCase: int=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Optional[Any]=16 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: Any=3 ,__lowerCAmelCase: int=None ,__lowerCAmelCase: List[str]=1_000 ,):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : List[Any] = seq_length
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Union[str, Any] = use_input_mask
_lowerCamelCase : int = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = max_position_embeddings
_lowerCamelCase : Optional[int] = type_vocab_size
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : str = num_labels
_lowerCamelCase : Any = scope
_lowerCamelCase : Dict = range_bbox
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCamelCase : Dict = bbox[i, j, 3]
_lowerCamelCase : Dict = bbox[i, j, 1]
_lowerCamelCase : Dict = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCamelCase : Dict = bbox[i, j, 2]
_lowerCamelCase : List[Any] = bbox[i, j, 0]
_lowerCamelCase : Optional[Any] = t
_lowerCamelCase : Dict = None
if self.use_input_mask:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
_lowerCamelCase : str = None
if self.use_token_type_ids:
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Union[str, Any] = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCamelCase : str = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,):
'''simple docstring'''
_lowerCamelCase : Dict = LiltModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,bbox=__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,bbox=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )
_lowerCamelCase : Dict = model(__lowerCAmelCase ,bbox=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Dict ,):
'''simple docstring'''
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : str = LiltForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : int = model(
__lowerCAmelCase ,bbox=__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Dict ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = LiltForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[Any] = model(
__lowerCAmelCase ,bbox=__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,start_positions=__lowerCAmelCase ,end_positions=__lowerCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : int = config_and_inputs
_lowerCamelCase : Optional[Any] = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
return True
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = LiltModelTester(self )
_lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
@slow
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = LiltModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@slow
class A_ ( unittest.TestCase ):
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Tuple = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(__lowerCAmelCase )
_lowerCamelCase : Dict = torch.tensor([[1, 2]] ,device=__lowerCAmelCase )
_lowerCamelCase : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] ,device=__lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCamelCase : int = model(input_ids=__lowerCAmelCase ,bbox=__lowerCAmelCase )
_lowerCamelCase : int = torch.Size([1, 2, 768] )
_lowerCamelCase : str = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] ,device=__lowerCAmelCase ,)
self.assertTrue(outputs.last_hidden_state.shape ,__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] ,__lowerCAmelCase ,atol=1e-3 ) ) | 386 | 0 |
'''simple docstring'''
import os
def _UpperCamelCase ( ) -> Union[str, Any]:
with open(os.path.dirname(__UpperCamelCase ) + '/grid.txt' ) as f:
lowerCamelCase_ = [] # noqa: E741
for _ in range(20 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
lowerCamelCase_ = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase_ = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase_ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase_ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
lowerCamelCase_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase_ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )["last_hidden_state"]
UpperCAmelCase = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape ,__SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 333 | 0 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCAmelCase : Union[str, Any] = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCAmelCase : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCAmelCase : Tuple = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCAmelCase : List[str] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = None
# source code of `config_class`
_lowerCAmelCase : Any = inspect.getsource(_A )
_lowerCAmelCase : List[str] = _re_checkpoint.findall(_A )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
_lowerCAmelCase : Optional[Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_lowerCAmelCase : List[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_lowerCAmelCase : Optional[int] = ckpt_name
break
return checkpoint
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Dict = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_lowerCAmelCase : Any = get_checkpoint_from_config_class(_A )
_lowerCAmelCase : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_A )
if len(_A ) > 0:
_lowerCAmelCase : List[str] = '\n'.join(sorted(_A ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 630 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Any = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class _UpperCamelCase( __lowerCamelCase ):
__SCREAMING_SNAKE_CASE : int = ['''pixel_values''']
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = size if size is not None else {'shortest_edge': 2_5_6}
__a : str = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
__a : int = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__a : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
__a : Dict = do_resize
__a : Union[str, Any] = size
__a : Any = resample
__a : List[Any] = do_center_crop
__a : Any = crop_size
__a : Tuple = do_rescale
__a : Optional[int] = rescale_factor
__a : Optional[int] = do_normalize
__a : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
'''simple docstring'''
__a : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__a : int = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE__ )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
'''simple docstring'''
__a : str = get_size_dict(SCREAMING_SNAKE_CASE__ )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Union[float, List[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Any , ):
'''simple docstring'''
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[float] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Tuple = do_resize if do_resize is not None else self.do_resize
__a : Optional[int] = size if size is not None else self.size
__a : int = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
__a : Dict = resample if resample is not None else self.resample
__a : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__a : Optional[int] = crop_size if crop_size is not None else self.crop_size
__a : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ )
__a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__a : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__a : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
__a : Optional[Any] = image_mean if image_mean is not None else self.image_mean
__a : List[Any] = image_std if image_std is not None else self.image_std
__a : int = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__a : Any = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
__a : Any = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_center_crop:
__a : str = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
__a : List[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
__a : Any = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
__a : Tuple = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
__a : int = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 47 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.0_1),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def __A ( cls : str ) -> List[Any]:
__lowerCamelCase = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __A ( self : int ) -> List[str]:
__lowerCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__lowerCamelCase = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE__ , repo_id='''test-config''' , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
__lowerCamelCase = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __A ( self : Optional[Any] ) -> Any:
__lowerCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__lowerCamelCase = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE__ , repo_id='''valid_org/test-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
__lowerCamelCase = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __A ( self : Optional[Any] ) -> Dict:
CustomConfig.register_for_auto_class()
__lowerCamelCase = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__lowerCamelCase = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> Tuple:
__lowerCamelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCamelCase = c.n_embd + 1 # int
__lowerCamelCase = c.resid_pdrop + 1.0 # float
__lowerCamelCase = not c.scale_attn_weights # bool
__lowerCamelCase = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , c.summary_type , '''mismatch for key: summary_type''' )
def __A ( self : str ) -> Dict:
__lowerCamelCase = PretrainedConfig()
__lowerCamelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__lowerCamelCase = [key for key, value in config_common_kwargs.items() if value == getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(SCREAMING_SNAKE_CASE__ )}.''' )
def __A ( self : Dict ) -> List[str]:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase = mock.Mock()
__lowerCamelCase = 5_00
__lowerCamelCase = {}
__lowerCamelCase = HTTPError
__lowerCamelCase = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE__ ) as mock_head:
__lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : List[str] ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __A ( self : List[Any] ) -> List[Any]:
__lowerCamelCase = AutoConfig.from_pretrained('''bert-base-cased''' )
__lowerCamelCase = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(SCREAMING_SNAKE_CASE__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCamelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCamelCase = ['''config.42.0.0.json''']
__lowerCamelCase = 7_68
configuration.save_pretrained(SCREAMING_SNAKE_CASE__ )
shutil.move(os.path.join(SCREAMING_SNAKE_CASE__ , '''config.4.0.0.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''config.42.0.0.json''' ) )
__lowerCamelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[Any] ) -> Tuple:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCamelCase = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__lowerCamelCase = '''v4.0.0'''
__lowerCamelCase , __lowerCamelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , return_unused_kwargs=SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCamelCase = '''v3.0.0'''
__lowerCamelCase = old_transformers.models.auto.AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 298 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCamelCase__ : List[str] = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __UpperCamelCase( _A : Optional[Any] , _A : Any , _A : Any , _A : List[str]=None ):
'''simple docstring'''
# Initialise PyTorch model
UpperCAmelCase__ : Optional[Any] = XLNetConfig.from_json_file(_A )
UpperCAmelCase__ : Optional[Any] = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
UpperCAmelCase__ : List[str] = finetuning_task
UpperCAmelCase__ : List[str] = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase__ : Dict = XLNetForSequenceClassification(_A )
elif "squad" in finetuning_task:
UpperCAmelCase__ : Any = finetuning_task
UpperCAmelCase__ : Tuple = XLNetForQuestionAnswering(_A )
else:
UpperCAmelCase__ : List[str] = XLNetLMHeadModel(_A )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_A , _A , _A )
# Save pytorch-model
UpperCAmelCase__ : Union[str, Any] = os.path.join(_A , _A )
UpperCAmelCase__ : Optional[int] = os.path.join(_A , _A )
print(F'''Save PyTorch model to {os.path.abspath(_A )}''' )
torch.save(model.state_dict() , _A )
print(F'''Save configuration file to {os.path.abspath(_A )}''' )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
UpperCamelCase__ : Tuple = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 496 | '''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : str = '▁'
UpperCamelCase__ : Any = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCamelCase__ : Union[str, Any] = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
UpperCamelCase__ : Dict = {
'facebook/m2m100_418M': 1_024,
}
# fmt: off
UpperCamelCase__ : Optional[int] = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class _lowercase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase_ : List[int] = []
UpperCAmelCase_ : List[int] = []
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_="<s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_="<pad>" ,lowerCamelCase_="<unk>" ,lowerCamelCase_="m2m100" ,lowerCamelCase_ = None ,lowerCamelCase_=8 ,**lowerCamelCase_ ,) -> None:
'''simple docstring'''
UpperCAmelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase__ : Dict = language_codes
UpperCAmelCase__ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCAmelCase__ : Union[str, Any] = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
UpperCAmelCase__ : Any = kwargs.get('''additional_special_tokens''' ,[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowerCamelCase_ )
for lang_code in fairseq_language_code
if self.get_lang_token(lowerCamelCase_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCamelCase_ ,tgt_lang=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,language_codes=lowerCamelCase_ ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=lowerCamelCase_ ,**lowerCamelCase_ ,)
UpperCAmelCase__ : Optional[int] = vocab_file
UpperCAmelCase__ : Optional[Any] = load_json(lowerCamelCase_ )
UpperCAmelCase__ : List[str] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[Any] = spm_file
UpperCAmelCase__ : Any = load_spm(lowerCamelCase_ ,self.sp_model_kwargs )
UpperCAmelCase__ : int = len(self.encoder )
UpperCAmelCase__ : Optional[int] = {
self.get_lang_token(lowerCamelCase_ ): self.encoder_size + i for i, lang_code in enumerate(lowerCamelCase_ )
}
UpperCAmelCase__ : List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowerCamelCase_ )}
UpperCAmelCase__ : List[str] = {v: k for k, v in self.lang_token_to_id.items()}
UpperCAmelCase__ : Optional[int] = src_lang if src_lang is not None else '''en'''
UpperCAmelCase__ : int = tgt_lang
UpperCAmelCase__ : int = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCAmelCase__ : Optional[int] = num_madeup_words
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_ ,out_type=lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowerCamelCase_ ,self.encoder[self.unk_token] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowerCamelCase_ ,self.unk_token )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Any = []
UpperCAmelCase__ : str = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase_ ) + token
UpperCAmelCase__ : str = []
else:
current_sub_tokens.append(lowerCamelCase_ )
out_string += self.sp_model.decode(lowerCamelCase_ )
return out_string.strip()
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ,lowerCamelCase_ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ ,token_ids_a=lowerCamelCase_ ,already_has_special_tokens=lowerCamelCase_ )
UpperCAmelCase__ : Dict = [1] * len(self.prefix_tokens )
UpperCAmelCase__ : Optional[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase_ )) + ([0] * len(lowerCamelCase_ )) + suffix_ones
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Tuple = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.__dict__.copy()
UpperCAmelCase__ : str = None
return state
def __setstate__( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : int = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : int = load_spm(self.spm_file ,self.sp_model_kwargs )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Path(lowerCamelCase_ )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
UpperCAmelCase__ : Optional[int] = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
UpperCAmelCase__ : str = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder ,lowerCamelCase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,lowerCamelCase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCamelCase_ ,'''wb''' ) as fi:
UpperCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
return (str(lowerCamelCase_ ), str(lowerCamelCase_ ))
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = "en" ,lowerCamelCase_ = None ,lowerCamelCase_ = "ro" ,**lowerCamelCase_ ,) -> BatchEncoding:
'''simple docstring'''
UpperCAmelCase__ : int = src_lang
UpperCAmelCase__ : Tuple = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[int]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
UpperCAmelCase__ : List[str] = src_lang
UpperCAmelCase__ : List[str] = self(lowerCamelCase_ ,add_special_tokens=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = self.get_lang_id(lowerCamelCase_ )
UpperCAmelCase__ : Dict = tgt_lang_id
return inputs
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_lang_token(lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = self.lang_token_to_id[lang_token]
UpperCAmelCase__ : Union[str, Any] = [self.cur_lang_id]
UpperCAmelCase__ : Dict = [self.eos_token_id]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> None:
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_lang_token(lowerCamelCase_ )
UpperCAmelCase__ : Optional[int] = self.lang_token_to_id[lang_token]
UpperCAmelCase__ : Tuple = [self.cur_lang_id]
UpperCAmelCase__ : str = [self.eos_token_id]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def lowerCAmelCase__ ( self ,lowerCamelCase_ ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_lang_token(lowerCamelCase_ )
return self.lang_token_to_id[lang_token]
def __UpperCamelCase( _A : str , _A : Dict[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = sentencepiece.SentencePieceProcessor(**_A )
spm.Load(str(_A ) )
return spm
def __UpperCamelCase( _A : str ):
'''simple docstring'''
with open(_A , '''r''' ) as f:
return json.load(_A )
def __UpperCamelCase( _A : List[str] , _A : str ):
'''simple docstring'''
with open(_A , '''w''' ) as f:
json.dump(_A , _A , indent=2 )
| 496 | 1 |
import argparse
from collections import defaultdict
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: str =F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__a , "r" ) as f:
lowerCamelCase__: Optional[int] =f.readlines()
lowerCamelCase__: List[str] =F"""class {class_name}("""
lowerCamelCase__: Any =F"""{4 * " "}def {test_name}("""
lowerCamelCase__: Dict =F"""{8 * " "}{correct_line.split()[0]}"""
lowerCamelCase__: Any =F"""{16 * " "}{correct_line.split()[0]}"""
lowerCamelCase__: Tuple =False
lowerCamelCase__: List[str] =False
lowerCamelCase__: List[Any] =False
lowerCamelCase__: int =False
lowerCamelCase__: Optional[int] =0
lowerCamelCase__: Optional[Any] =0
lowerCamelCase__: List[str] =[]
for line in lines:
if line.startswith(__a ):
lowerCamelCase__: List[str] =True
elif in_class and line.startswith(__a ):
lowerCamelCase__: Dict =True
elif in_class and in_func and (line.startswith(__a ) or line.startswith(__a )):
lowerCamelCase__: int =len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCamelCase__: Dict =True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCamelCase__: Union[str, Any] =True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
lowerCamelCase__: Any =False
else:
new_lines.append(__a )
with open(__a , "w" ) as f:
for line in new_lines:
f.write(__a )
def lowerCAmelCase_ ( __a , __a=None ) -> int:
"""simple docstring"""
if fail is not None:
with open(__a , "r" ) as f:
lowerCamelCase__: str ={l.strip() for l in f.readlines()}
else:
lowerCamelCase__: List[str] =None
with open(__a , "r" ) as f:
lowerCamelCase__: Any =f.readlines()
lowerCamelCase__: str =defaultdict(__a )
for line in correct_lines:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__a , __a , __a , __a , __a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
__A = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 59 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : str = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[Any] = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
snake_case_ : Optional[int] = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
snake_case_ : Tuple = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = SqueezeBertTokenizer
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__="[UNK]" , lowerCamelCase__="[SEP]" , lowerCamelCase__="[PAD]" , lowerCamelCase__="[CLS]" , lowerCamelCase__="[MASK]" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
'''simple docstring'''
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , tokenize_chinese_chars=lowerCamelCase__ , strip_accents=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCamelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCamelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCamelCase__ ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(lowerCamelCase__ , normalizer_state.pop('''type''' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**lowerCamelCase__ )
UpperCamelCase = do_lower_case
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
UpperCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 212 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
def UpperCamelCase__ ( self :Any, snake_case :str):
"""simple docstring"""
with open(snake_case, encoding='utf-8') as input_file:
_lowercase =re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)')
_lowercase =input_file.read()
_lowercase =regexp.search(snake_case)
return match
def UpperCamelCase__ ( self :Optional[Any], snake_case :str):
"""simple docstring"""
with open(snake_case, encoding='utf-8') as input_file:
_lowercase =re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()', re.DOTALL)
_lowercase =input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_lowercase =regexp.finditer(snake_case)
_lowercase =[match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =Path('./datasets')
_lowercase =list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case)):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''')
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =Path('./datasets')
_lowercase =list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_print_statements(str(snake_case)):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''')
| 557 |
from math import isqrt, loga
def _snake_case (_snake_case : int) -> list[int]:
_lowercase =[True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , _snake_case , _snake_case):
_lowercase =False
return [i for i in range(2 , _snake_case) if is_prime[i]]
def _snake_case (_snake_case : int = 80_0800 , _snake_case : int = 80_0800) -> int:
_lowercase =degree * loga(_snake_case)
_lowercase =int(_snake_case)
_lowercase =calculate_prime_numbers(_snake_case)
_lowercase =0
_lowercase =0
_lowercase =len(_snake_case) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 557 | 1 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> Any:
__lowerCAmelCase = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def A__ ( self ) -> Tuple:
__lowerCAmelCase = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__UpperCamelCase ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__UpperCamelCase ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__UpperCamelCase ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
__lowerCAmelCase = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
__lowerCAmelCase = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def A__ ( self ) -> Optional[int]:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
__lowerCAmelCase = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__lowerCAmelCase = """fp16"""
self.assertFalse(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
__lowerCAmelCase = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def A__ ( self ) -> int:
# pass variant but use the non-variant filenames
__lowerCAmelCase = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
__lowerCAmelCase = """fp16"""
self.assertTrue(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
__lowerCAmelCase = """fp16"""
self.assertFalse(is_safetensors_compatible(__UpperCamelCase , variant=__UpperCamelCase ) )
| 465 |
import math
import sys
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
A = ''
try:
with open(lowerCAmelCase__ , 'rb' ) as binary_file:
A = binary_file.read()
for dat in data:
A = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
A = {'0': '0', '1': '1'}
A , A = '', ''
A = len(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A = lexicon[curr_string]
result += last_match_id
A = last_match_id + '0'
if math.loga(lowerCAmelCase__ ).is_integer():
A = {}
for curr_key in list(lowerCAmelCase__ ):
A = lexicon.pop(lowerCAmelCase__ )
A = new_lex
A = last_match_id + '1'
index += 1
A = ''
return result
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
A = 8
try:
with open(lowerCAmelCase__ , 'wb' ) as opened_file:
A = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowerCAmelCase__ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
A = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
A = data_bits[counter:]
A = data_bits[counter + 1 :]
return data_bits
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
A = read_file_binary(lowerCAmelCase__ )
A = remove_prefix(lowerCAmelCase__ )
A = decompress_data(lowerCAmelCase__ )
write_file_binary(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2]) | 106 | 0 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCamelCase ( lowercase_ = 3 ) -> str:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(lowerCAmelCase__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
lowercase__ : int = QuantumRegister(lowerCAmelCase__ , """qr""" )
lowercase__ : Dict = ClassicalRegister(lowerCAmelCase__ , """cr""" )
lowercase__ : Tuple = QuantumCircuit(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ : int = number_of_qubits
for i in range(lowerCAmelCase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowerCAmelCase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowerCAmelCase__ , lowerCAmelCase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowerCAmelCase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowerCAmelCase__ , lowerCAmelCase__ )
# simulate with 10000 shots
lowercase__ : List[Any] = Aer.get_backend("""qasm_simulator""" )
lowercase__ : Optional[Any] = execute(lowerCAmelCase__ , lowerCAmelCase__ , shots=1_00_00 )
return job.result().get_counts(lowerCAmelCase__ )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 714 |
import argparse
import os
import re
lowerCamelCase__ : List[Any] = """src/transformers"""
# Pattern that looks at the indentation in a line.
lowerCamelCase__ : Union[str, Any] = re.compile(R"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCamelCase__ : Any = re.compile(R"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCamelCase__ : int = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCamelCase__ : List[Any] = re.compile(R"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCamelCase__ : str = re.compile(R"""\[([^\]]+)\]""")
def UpperCamelCase ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ : Optional[Any] = _re_indent.search(lowercase_ )
return "" if search is None else search.groups()[0]
def UpperCamelCase ( lowercase_ , lowercase_="" , lowercase_=None , lowercase_=None ) -> Dict:
'''simple docstring'''
lowercase__ : List[str] = 0
lowercase__ : List[Any] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(lowercase_ ):
index += 1
lowercase__ : List[str] = ["""\n""".join(lines[:index] )]
else:
lowercase__ : Optional[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ : List[Any] = [lines[index]]
index += 1
while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(lowercase_ ) )
if index < len(lowercase_ ) - 1:
lowercase__ : str = [lines[index + 1]]
index += 1
else:
lowercase__ : Union[str, Any] = []
else:
blocks.append("""\n""".join(lowercase_ ) )
lowercase__ : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase_ ) > 0:
blocks.append("""\n""".join(lowercase_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def UpperCamelCase ( lowercase_ ) -> List[Any]:
'''simple docstring'''
def _inner(lowercase_ ):
return key(lowercase_ ).lower().replace("""_""" , """""" )
return _inner
def UpperCamelCase ( lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
def noop(lowercase_ ):
return x
if key is None:
lowercase__ : Dict = noop
# Constants are all uppercase, they go first.
lowercase__ : Dict = [obj for obj in objects if key(lowercase_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ : Any = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ : Optional[int] = [obj for obj in objects if not key(lowercase_ )[0].isupper()]
lowercase__ : Tuple = ignore_underscore(lowercase_ )
return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ )
def UpperCamelCase ( lowercase_ ) -> Tuple:
'''simple docstring'''
def _replace(lowercase_ ):
lowercase__ : int = match.groups()[0]
if "," not in imports:
return F'[{imports}]'
lowercase__ : Union[str, Any] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ : int = keys[:-1]
return "[" + ", ".join([F'"{k}"' for k in sort_objects(lowercase_ )] ) + "]"
lowercase__ : Any = import_statement.split("""\n""" )
if len(lowercase_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ : Tuple = 2 if lines[1].strip() == """[""" else 1
lowercase__ : Optional[int] = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase__ : Optional[Any] = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] )
lowercase__ : Union[str, Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase__ : List[str] = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase__ : Optional[int] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase__ : Optional[int] = keys[:-1]
lowercase__ : int = get_indent(lines[1] ) + """, """.join([F'"{k}"' for k in sort_objects(lowercase_ )] )
return "\n".join(lowercase_ )
else:
# Finally we have to deal with imports fitting on one line
lowercase__ : Any = _re_bracket_content.sub(_replace , lowercase_ )
return import_statement
def UpperCamelCase ( lowercase_ , lowercase_=True ) -> Optional[int]:
'''simple docstring'''
with open(lowercase_ , encoding="""utf-8""" ) as f:
lowercase__ : str = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ : Any = split_code_in_indented_blocks(
lowercase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ : List[str] = main_blocks[block_idx]
lowercase__ : str = block.split("""\n""" )
# Get to the start of the imports.
lowercase__ : Optional[Any] = 0
while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ : int = len(lowercase_ )
else:
line_idx += 1
if line_idx >= len(lowercase_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ : List[str] = """\n""".join(block_lines[line_idx:-1] )
lowercase__ : Dict = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase__ : List[str] = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ : Optional[int] = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ : Optional[Any] = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ : List[Any] = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None]
lowercase__ : Optional[int] = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ : Any = 0
lowercase__ : Optional[Any] = []
for i in range(len(lowercase_ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowercase__ : Optional[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowercase_ )
count += 1
# And we put our main block back together with its first and last line.
lowercase__ : Union[str, Any] = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase_ ):
if check_only:
return True
else:
print(F'Overwriting {file}.' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase_ ) )
def UpperCamelCase ( lowercase_=True ) -> Optional[int]:
'''simple docstring'''
lowercase__ : int = []
for root, _, files in os.walk(lowercase_ ):
if "__init__.py" in files:
lowercase__ : Dict = sort_imports(os.path.join(lowercase_ , """__init__.py""" ) , check_only=lowercase_ )
if result:
lowercase__ : List[str] = [os.path.join(lowercase_ , """__init__.py""" )]
if len(lowercase_ ) > 0:
raise ValueError(F'Would overwrite {len(lowercase_ )} files, run `make style`.' )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
lowerCamelCase__ : List[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 495 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Tuple ={
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict =[
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase : List[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 54 |
from __future__ import annotations
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , a % b )
UpperCAmelCase_ =a // b
return (y, x - k * y)
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((UpperCAmelCase_) , (UpperCAmelCase_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
UpperCAmelCase_ =(b % n + n) % n
return b
def a__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
UpperCAmelCase_ =na * na
UpperCAmelCase_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 54 | 1 |
'''simple docstring'''
def __A ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Tuple = len(_SCREAMING_SNAKE_CASE )
for i in range(n - 1 ):
for j in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __A ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) <= 1:
return arr, 0
__SCREAMING_SNAKE_CASE : Optional[int] = len(_SCREAMING_SNAKE_CASE ) // 2
__SCREAMING_SNAKE_CASE : List[Any] = arr[0:mid]
__SCREAMING_SNAKE_CASE : List[str] = arr[mid:]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = _count_cross_inversions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __A ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : int = 0
while i < len(_SCREAMING_SNAKE_CASE ) and j < len(_SCREAMING_SNAKE_CASE ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_SCREAMING_SNAKE_CASE ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_SCREAMING_SNAKE_CASE ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__SCREAMING_SNAKE_CASE : List[Any] = count_inversions_bf(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , _SCREAMING_SNAKE_CASE )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__SCREAMING_SNAKE_CASE : Optional[int] = count_inversions_bf(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _SCREAMING_SNAKE_CASE )
# an empty list should also have zero inversions
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : int = count_inversions_bf(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = count_inversions_recursive(_SCREAMING_SNAKE_CASE )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 564 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''spiece.model'''}
lowercase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowercase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowercase = '''▁'''
class __lowerCamelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : str = VOCAB_FILES_NAMES
snake_case__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , a__ , a__="</s>" , a__="<unk>" , a__="<pad>" , a__=100 , a__=None , a__ = None , a__=True , **a__ , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = [f'<extra_id_{i}>' for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(set(filter(lambda a__ : bool("extra_id" in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
__SCREAMING_SNAKE_CASE : int = legacy
__SCREAMING_SNAKE_CASE : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
__SCREAMING_SNAKE_CASE : Dict = vocab_file
__SCREAMING_SNAKE_CASE : Union[str, Any] = extra_ids
__SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def a_ ( a__ , a__ , a__ ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__SCREAMING_SNAKE_CASE : Optional[int] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , a__ , )
return max_model_length
@property
def a_ ( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a_ ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a_ ( self ):
return list(
set(filter(lambda a__ : bool(re.search(R"<extra_id_\d+>" , a__ ) ) is not None , self.additional_special_tokens ) ) )
def a_ ( self ):
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def a_ ( self , a__ ):
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def a_ ( self , a__ , a__ = None ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def a_ ( self , a__ , a__ = None ):
__SCREAMING_SNAKE_CASE : List[str] = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
__SCREAMING_SNAKE_CASE : Any = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self ):
__SCREAMING_SNAKE_CASE : Any = self.__dict__.copy()
__SCREAMING_SNAKE_CASE : List[str] = None
return state
def __setstate__( self , a__ ):
__SCREAMING_SNAKE_CASE : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__SCREAMING_SNAKE_CASE : List[Any] = {}
__SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self , a__ , **a__ ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__SCREAMING_SNAKE_CASE : str = SPIECE_UNDERLINE + text.replace(a__ , " " )
return super().tokenize(a__ , **a__ )
def a_ ( self , a__ , **a__ ):
if not self.legacy:
__SCREAMING_SNAKE_CASE : Union[str, Any] = text.startswith(a__ )
if is_first:
__SCREAMING_SNAKE_CASE : str = text[1:]
__SCREAMING_SNAKE_CASE : List[str] = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def a_ ( self , a__ ):
if token.startswith("<extra_id_" ):
__SCREAMING_SNAKE_CASE : Any = re.match(R"<extra_id_(\d+)>" , a__ )
__SCREAMING_SNAKE_CASE : str = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def a_ ( self , a__ ):
if index < self.sp_model.get_piece_size():
__SCREAMING_SNAKE_CASE : Any = self.sp_model.IdToPiece(a__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def a_ ( self , a__ ):
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = ""
__SCREAMING_SNAKE_CASE : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : Any = []
else:
current_sub_tokens.append(a__ )
__SCREAMING_SNAKE_CASE : Dict = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
__SCREAMING_SNAKE_CASE : Dict = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 564 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase : ClassVar[Features] = Features({'audio': Audio()} )
lowercase : ClassVar[Features] = Features({'labels': ClassLabel} )
lowercase : str = "audio"
lowercase : str = "labels"
def __lowerCamelCase ( self , __UpperCamelCase ) -> Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__UpperCamelCase : List[Any] = copy.deepcopy(self )
__UpperCamelCase : Dict = self.label_schema.copy()
__UpperCamelCase : Optional[Any] = features[self.label_column]
__UpperCamelCase : str = label_schema
return task_template
@property
def __lowerCamelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
} | 327 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : torch.FloatTensor
lowercase : Optional[torch.FloatTensor] = None
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str]=0.999 , _lowerCAmelCase : str="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCAmelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCAmelCase : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__UpperCamelCase : Dict = []
for i in range(_lowerCAmelCase ):
__UpperCamelCase : Union[str, Any] = i / num_diffusion_timesteps
__UpperCamelCase : Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCAmelCase ) / alpha_bar_fn(_lowerCAmelCase ) , _lowerCAmelCase ) )
return torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = "fixed_small_log" , __UpperCamelCase = True , __UpperCamelCase = 1.0 , __UpperCamelCase = "epsilon" , __UpperCamelCase = "squaredcos_cap_v2" , ) -> Tuple:
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
__UpperCamelCase : List[str] = betas_for_alpha_bar(__UpperCamelCase )
__UpperCamelCase : Optional[Any] = 1.0 - self.betas
__UpperCamelCase : List[Any] = torch.cumprod(self.alphas , dim=0 )
__UpperCamelCase : Optional[int] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__UpperCamelCase : List[str] = 1.0
# setable values
__UpperCamelCase : List[str] = None
__UpperCamelCase : str = torch.from_numpy(np.arange(0 , __UpperCamelCase )[::-1].copy() )
__UpperCamelCase : Union[str, Any] = variance_type
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : Any = num_inference_steps
__UpperCamelCase : List[str] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__UpperCamelCase : List[str] = (np.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__UpperCamelCase : Any = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> Any:
'''simple docstring'''
if prev_timestep is None:
__UpperCamelCase : Union[str, Any] = t - 1
__UpperCamelCase : Union[str, Any] = self.alphas_cumprod[t]
__UpperCamelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCamelCase : int = 1 - alpha_prod_t
__UpperCamelCase : Union[str, Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCamelCase : str = self.betas[t]
else:
__UpperCamelCase : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCamelCase : Union[str, Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__UpperCamelCase : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__UpperCamelCase : Dict = torch.log(torch.clamp(__UpperCamelCase , min=1E-20 ) )
__UpperCamelCase : Dict = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__UpperCamelCase : Tuple = variance.log()
__UpperCamelCase : str = beta.log()
__UpperCamelCase : Union[str, Any] = (predicted_variance + 1) / 2
__UpperCamelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log
return variance
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase=None , __UpperCamelCase = True , ) -> Union[UnCLIPSchedulerOutput, Tuple]:
'''simple docstring'''
__UpperCamelCase : Dict = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__UpperCamelCase , __UpperCamelCase : Optional[Any] = torch.split(__UpperCamelCase , sample.shape[1] , dim=1 )
else:
__UpperCamelCase : Any = None
# 1. compute alphas, betas
if prev_timestep is None:
__UpperCamelCase : List[str] = t - 1
__UpperCamelCase : Optional[int] = self.alphas_cumprod[t]
__UpperCamelCase : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__UpperCamelCase : Tuple = 1 - alpha_prod_t
__UpperCamelCase : Tuple = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__UpperCamelCase : Any = self.betas[t]
__UpperCamelCase : Any = self.alphas[t]
else:
__UpperCamelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
__UpperCamelCase : Union[str, Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCamelCase : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCamelCase : Any = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCamelCase : Optional[int] = torch.clamp(
__UpperCamelCase , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase : Dict = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__UpperCamelCase : List[str] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCamelCase : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__UpperCamelCase : int = 0
if t > 0:
__UpperCamelCase : str = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCamelCase , device=model_output.device )
__UpperCamelCase : int = self._get_variance(
__UpperCamelCase , predicted_variance=__UpperCamelCase , prev_timestep=__UpperCamelCase , )
if self.variance_type == "fixed_small_log":
__UpperCamelCase : Any = variance
elif self.variance_type == "learned_range":
__UpperCamelCase : List[Any] = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
__UpperCamelCase : Tuple = variance * variance_noise
__UpperCamelCase : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCamelCase , pred_original_sample=__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> torch.FloatTensor:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__UpperCamelCase : Any = timesteps.to(original_samples.device )
__UpperCamelCase : List[Any] = alphas_cumprod[timesteps] ** 0.5
__UpperCamelCase : List[str] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCamelCase : List[Any] = sqrt_alpha_prod.unsqueeze(-1 )
__UpperCamelCase : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCamelCase : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__UpperCamelCase : Tuple = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__UpperCamelCase : Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples | 327 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( _A ):
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 551 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self , a_ ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCAmelCase : Tuple = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[int] = "sshleifer/tiny-gpt2"
lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(a_ )
lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
lowerCAmelCase : Union[str, Any] = "sgugger/tiny-distilbert-classification"
lowerCAmelCase : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , only_pretrain_model=a_ , )
lowerCAmelCase : Any = TensorFlowBenchmark(a_ )
lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
lowerCAmelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
lowerCAmelCase : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
lowerCAmelCase : List[str] = TensorFlowBenchmark(a_ )
lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[str] = "sshleifer/tiny-gpt2"
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(a_ )
lowerCAmelCase : Optional[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a_ , multi_process=a_ , )
lowerCAmelCase : Any = TensorFlowBenchmark(a_ , [config] )
lowerCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
lowerCAmelCase : int = "sshleifer/tiny-gpt2"
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(a_ )
lowerCAmelCase : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
lowerCAmelCase : Tuple = TensorFlowBenchmark(a_ , [config] )
lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
lowerCAmelCase : Tuple = "sshleifer/tiny-gpt2"
lowerCAmelCase : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
lowerCAmelCase : List[str] = TensorFlowBenchmark(a_ )
lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = "sshleifer/tiny-gpt2"
lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(a_ )
lowerCAmelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
lowerCAmelCase : Dict = TensorFlowBenchmark(a_ , [config] )
lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
lowerCAmelCase : str = "patrickvonplaten/t5-tiny-random"
lowerCAmelCase : Tuple = AutoConfig.from_pretrained(a_ )
lowerCAmelCase : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a_ , )
lowerCAmelCase : List[str] = TensorFlowBenchmark(a_ , configs=[config] )
lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def _lowerCamelCase ( self ):
lowerCAmelCase : str = "sshleifer/tiny-gpt2"
lowerCAmelCase : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=a_ , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a_ , multi_process=a_ , )
lowerCAmelCase : Any = TensorFlowBenchmark(a_ )
lowerCAmelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
lowerCAmelCase : Tuple = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : Optional[int] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , save_to_csv=a_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a_ , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(a_ , "inf_mem.csv" ) , env_info_csv_file=os.path.join(a_ , "env.csv" ) , multi_process=a_ , )
lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(a_ )
benchmark.run()
self.assertTrue(Path(os.path.join(a_ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(a_ , "env.csv" ) ).exists() )
def _lowerCamelCase ( self ):
lowerCAmelCase : Any = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(a_ ):
self.assertTrue(hasattr(a_ , "sequential" ) )
self.assertTrue(hasattr(a_ , "cumulative" ) )
self.assertTrue(hasattr(a_ , "current" ) )
self.assertTrue(hasattr(a_ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=a_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a_ , "log.txt" ) , log_print=a_ , trace_memory_line_by_line=a_ , eager_mode=a_ , multi_process=a_ , )
lowerCAmelCase : Optional[Any] = TensorFlowBenchmark(a_ )
lowerCAmelCase : Tuple = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(a_ , "log.txt" ) ).exists() )
| 551 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self : Optional[Any] ):
'''simple docstring'''
self.test()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
a_ : Optional[Any] = 0
a_ : Optional[Any] = False
while not completed:
if counter == 1:
self.reset()
a_ : Any = self.advance()
if not self.does_advance(lowercase__ ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
a_ , a_ , a_ : Optional[int] = self.update(lowercase__ )
counter += 1
if counter > 1_0000:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def lowercase_ ( self : int ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowercase_ ( self : Dict , lowercase__ : int ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowercase_ ( self : Optional[Any] , lowercase__ : int ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowercase_ ( self : Dict ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def lowercase_ ( self : List[Any] , lowercase__ : Union[str, Any]=False ):
'''simple docstring'''
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self : List[str] , lowercase__ : List[int] ):
'''simple docstring'''
super(lowercase__ , self ).__init__()
if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(lowercase__ , lowercase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
a_ : Optional[int] = token_ids
a_ : Optional[Any] = len(self.token_ids )
a_ : List[str] = -1 # the index of the currently fulfilled step
a_ : str = False
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def lowercase_ ( self : Optional[int] , lowercase__ : int ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(lowercase__ )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def lowercase_ ( self : Optional[int] , lowercase__ : int ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(lowercase__ )}" )
a_ : Dict = False
a_ : Optional[Any] = False
a_ : Dict = False
if self.does_advance(lowercase__ ):
self.fulfilled_idx += 1
a_ : Union[str, Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
a_ : str = True
a_ : Optional[int] = completed
else:
# failed to make progress.
a_ : Optional[int] = True
self.reset()
return stepped, completed, reset
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
a_ : Any = False
a_ : Tuple = 0
def lowercase_ ( self : List[str] ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def lowercase_ ( self : Optional[Any] , lowercase__ : Union[str, Any]=False ):
'''simple docstring'''
a_ : Optional[Any] = PhrasalConstraint(self.token_ids )
if stateful:
a_ : Union[str, Any] = self.seqlen
a_ : Optional[int] = self.fulfilled_idx
a_ : Dict = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , lowercase__ : List[List[int]] , lowercase__ : int=True ):
'''simple docstring'''
a_ : int = max([len(lowercase__ ) for one in nested_token_ids] )
a_ : Optional[Any] = {}
for token_ids in nested_token_ids:
a_ : List[Any] = root
for tidx, token_id in enumerate(lowercase__ ):
if token_id not in level:
a_ : Union[str, Any] = {}
a_ : Tuple = level[token_id]
if no_subsets and self.has_subsets(lowercase__ , lowercase__ ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
F" {nested_token_ids}." )
a_ : str = root
def lowercase_ ( self : Optional[int] , lowercase__ : Dict ):
'''simple docstring'''
a_ : Optional[int] = self.trie
for current_token in current_seq:
a_ : Optional[Any] = start[current_token]
a_ : Dict = list(start.keys() )
return next_tokens
def lowercase_ ( self : List[str] , lowercase__ : Union[str, Any] ):
'''simple docstring'''
a_ : Optional[Any] = self.next_tokens(lowercase__ )
return len(lowercase__ ) == 0
def lowercase_ ( self : Any , lowercase__ : str ):
'''simple docstring'''
a_ : List[str] = list(root.values() )
if len(lowercase__ ) == 0:
return 1
else:
return sum([self.count_leaves(lowercase__ ) for nn in next_nodes] )
def lowercase_ ( self : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ):
'''simple docstring'''
a_ : Optional[Any] = self.count_leaves(lowercase__ )
return len(lowercase__ ) != leaf_count
class SCREAMING_SNAKE_CASE ( snake_case_ ):
def __init__( self : List[Any] , lowercase__ : List[List[int]] ):
'''simple docstring'''
super(lowercase__ , self ).__init__()
if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(lowercase__ , lowercase__ ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(lowercase__ , lowercase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
a_ : str = DisjunctiveTrie(lowercase__ )
a_ : List[str] = nested_token_ids
a_ : Optional[Any] = self.trie.max_height
a_ : List[str] = []
a_ : str = False
def lowercase_ ( self : Tuple ):
'''simple docstring'''
a_ : Any = self.trie.next_tokens(self.current_seq )
if len(lowercase__ ) == 0:
return None
else:
return token_list
def lowercase_ ( self : int , lowercase__ : int ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase__ )}" )
a_ : Optional[int] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def lowercase_ ( self : Any , lowercase__ : int ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase__ )}" )
a_ : Tuple = False
a_ : List[Any] = False
a_ : List[str] = False
if self.does_advance(lowercase__ ):
self.current_seq.append(lowercase__ )
a_ : Any = True
else:
a_ : Optional[Any] = True
self.reset()
a_ : Any = self.trie.reached_leaf(self.current_seq )
a_ : Optional[Any] = completed
return stepped, completed, reset
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
a_ : List[str] = False
a_ : List[Any] = []
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def lowercase_ ( self : Union[str, Any] , lowercase__ : Union[str, Any]=False ):
'''simple docstring'''
a_ : Union[str, Any] = DisjunctiveConstraint(self.token_ids )
if stateful:
a_ : Any = self.seqlen
a_ : int = self.current_seq
a_ : Optional[Any] = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE :
def __init__( self : int , lowercase__ : List[Constraint] ):
'''simple docstring'''
a_ : Tuple = constraints
# max # of steps required to fulfill a given constraint
a_ : Dict = max([c.seqlen for c in constraints] )
a_ : Tuple = len(lowercase__ )
a_ : Tuple = False
self.init_state()
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
a_ : str = []
a_ : int = None
a_ : Optional[int] = [constraint.copy(stateful=lowercase__ ) for constraint in self.constraints]
def lowercase_ ( self : str ):
'''simple docstring'''
a_ : Any = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
a_ : Dict = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
a_ : Any = constraint.advance()
if isinstance(lowercase__ , lowercase__ ):
token_list.append(lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
token_list.extend(lowercase__ )
else:
a_ : List[Any] = self.inprogress_constraint.advance()
if isinstance(lowercase__ , lowercase__ ):
token_list.append(lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
token_list.extend(lowercase__ )
if len(lowercase__ ) == 0:
return None
else:
return token_list
def lowercase_ ( self : str , lowercase__ : Optional[List[int]] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
a_ , a_ : Optional[Any] = self.add(lowercase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def lowercase_ ( self : Optional[int] , lowercase__ : int ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
a_ , a_ : Dict = False, False
if self.completed:
a_ : Union[str, Any] = True
a_ : List[str] = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
a_ , a_ , a_ : List[str] = self.inprogress_constraint.update(lowercase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowercase__ ) )
a_ : Any = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
a_ : Union[str, Any] = None
if len(self.pending_constraints ) == 0:
# we're done!
a_ : Optional[int] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(lowercase__ ):
a_ , a_ , a_ : str = pending_constraint.update(lowercase__ )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(lowercase__ )
a_ : Tuple = None
if not complete and stepped:
a_ : int = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
a_ : Tuple = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
a_ : List[Any] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def lowercase_ ( self : List[Any] , lowercase__ : Optional[Any]=True ):
'''simple docstring'''
a_ : int = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
a_ : Optional[int] = [
constraint.copy(stateful=lowercase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
a_ : Dict = self.inprogress_constraint.copy(stateful=lowercase__ )
a_ : List[str] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 442 |
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
a_ : Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
a_ : Optional[int] = torch.permute(UpperCamelCase__ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase__ ):
# linear layer
a_ : Any = flax_key_tuple[:-1] + ("""weight""",)
a_ : Optional[int] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
a_ : Any = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
"""simple docstring"""
if "metadata" in layer:
a_ : Optional[Any] = layer.split("""metadata""" )
a_ : List[str] = """""".join(split_layer[0] )[:-1]
a_ : List[str] = [tuple(("""metadata""" + split_layer[1]).split("""/""" ) )]
elif "kvstore" in layer:
a_ : Union[str, Any] = layer.split("""kvstore""" )
a_ : Tuple = """""".join(split_layer[0] )[:-1]
a_ : Union[str, Any] = [tuple(("""kvstore""" + split_layer[1]).split("""/""" ) )]
else:
a_ : List[str] = layer.split("""/""" )
a_ : Union[str, Any] = """/""".join(split_layer[:-1] )
a_ : Union[str, Any] = (split_layer[-1],)
if "kvstore/path" in layer:
a_ : Any = F"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
a_ : Optional[int] = """file"""
else:
a_ : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
a_ : int = rename_keys(UpperCamelCase__ )
a_ : Any = {}
for k, v in current_block.items():
a_ : Union[str, Any] = v
a_ : List[str] = new_current_block
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str = WEIGHTS_NAME ):
"""simple docstring"""
a_ : str = convert_file_size_to_int(UpperCamelCase__ )
a_ : str = []
a_ : Optional[int] = {}
a_ : Optional[int] = 0
a_ : Any = 0
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""" ) as fp:
a_ : str = serialization.msgpack_restore(fp.read() )["""optimizer"""]["""target"""]
a_ : List[str] = flatten_dict(UpperCamelCase__ , sep="""/""" )
a_ : Dict = {}
for layer in checkpoint_info.keys():
a_ , a_ , a_ : List[Any] = get_key_and_tensorstore_dict(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if curr_real_layer_name in all_layers:
a_ : int = content
else:
a_ : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
a_ : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
a_ : List[Any] = torch.tensor(UpperCamelCase__ )
a_ : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
a_ , a_ : Tuple = rename_base_flax_keys(tuple(key.split("""/""" ) ) , UpperCamelCase__ )
a_ : Dict = """/""".join(UpperCamelCase__ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
a_ : Any = os.path.join(
UpperCamelCase__ , weights_name.replace(""".bin""" , F"-{len(UpperCamelCase__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
del current_block
a_ : Optional[int] = {}
a_ : Union[str, Any] = 0
a_ : List[str] = raw_weights.to(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
a_ : Any = os.path.join(UpperCamelCase__ , weights_name.replace(""".bin""" , F"-{len(UpperCamelCase__ )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCamelCase__ , UpperCamelCase__ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase__ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
a_ : Tuple = {}
a_ : List[Any] = {}
for idx, shard in enumerate(UpperCamelCase__ ):
a_ : str = weights_name.replace(
""".bin""" , F"-{idx+1:05d}-of-{len(UpperCamelCase__ ):05d}.bin" ) # len(sharded_state_dicts):05d}
a_ : Union[str, Any] = os.path.join(UpperCamelCase__ , weights_name.replace(""".bin""" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
a_ : str = shard
for key in shard:
a_ : Union[str, Any] = shard_file
# Add the metadata
a_ : List[str] = {"""total_size""": total_size}
a_ : str = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , """w""" , encoding="""utf-8""" ) as f:
a_ : Any = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n"""
f.write(UpperCamelCase__ )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowerCAmelCase_ : List[Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
a_ : Union[str, Any] = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""" )
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""" )
a_ : List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""" )
a_ : Union[str, Any] = TaTokenizer.from_pretrained("""t5-small""" )
a_ : Optional[Any] = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
a_ : List[Any] = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids
a_ : str = model.generate(UpperCamelCase__ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 442 | 1 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowercase_ = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
lowercase_ = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
lowercase_ = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def snake_case__ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def snake_case__ ( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[Any]=False ):
if rouge_types is None:
__snake_case : Optional[int] = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
__snake_case : Dict = rouge_scorer.RougeScorer(rouge_types=_lowerCAmelCase , use_stemmer=_lowerCAmelCase )
if use_aggregator:
__snake_case : Tuple = scoring.BootstrapAggregator()
else:
__snake_case : Optional[int] = []
for ref, pred in zip(_lowerCAmelCase , _lowerCAmelCase ):
__snake_case : Optional[int] = scorer.score(_lowerCAmelCase , _lowerCAmelCase )
if use_aggregator:
aggregator.add_scores(_lowerCAmelCase )
else:
scores.append(_lowerCAmelCase )
if use_aggregator:
__snake_case : List[str] = aggregator.aggregate()
else:
__snake_case : List[str] = {}
for key in scores[0]:
__snake_case : List[Any] = [score[key] for score in scores]
return result
| 390 | from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __lt__( self : Tuple , _lowerCAmelCase : Optional[int] ):
return self[-1] < other[-1]
def __eq__( self : Tuple , _lowerCAmelCase : Tuple ):
return self[-1] == other[-1]
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
__snake_case : list[Stack] = []
# sort into stacks
for element in collection:
__snake_case : Dict = Stack([element] )
__snake_case : int = bisect_left(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if i != len(__SCREAMING_SNAKE_CASE ):
stacks[i].append(__SCREAMING_SNAKE_CASE )
else:
stacks.append(__SCREAMING_SNAKE_CASE )
# use a heap-based merge to merge stack efficiently
__snake_case : int = merge(*(reversed(__SCREAMING_SNAKE_CASE ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
lowercase_ = [int(item) for item in user_input.split(",")]
print(patience_sort(unsorted))
| 390 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.