code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.json'}
_A = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_A = {'mgp-str': 27}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : str = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_ , A_="[GO]" , A_="[GO]" , A_="[s]" , A_="[GO]" , **A_ ) -> int:
super().__init__(
unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
__UpperCamelCase =json.load(A_ )
__UpperCamelCase ={v: k for k, v in self.vocab.items()}
@property
def _a ( self ) -> List[Any]:
return len(self.vocab )
def _a ( self ) -> str:
return dict(self.vocab , **self.added_tokens_encoder )
def _a ( self , A_ ) -> Dict:
__UpperCamelCase =[]
for s in text:
char_tokens.extend(A_ )
return char_tokens
def _a ( self , A_ ) -> Any:
return self.vocab.get(A_ , self.vocab.get(self.unk_token ) )
def _a ( self , A_ ) -> Union[str, Any]:
return self.decoder.get(A_ )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(A_ ) )
return
__UpperCamelCase =os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
return (vocab_file,)
| 715 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list ):
__UpperCamelCase =0
while len(SCREAMING_SNAKE_CASE__ ) > 1:
__UpperCamelCase =0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__UpperCamelCase =files.index(min(SCREAMING_SNAKE_CASE__ ) )
temp += files[min_index]
files.pop(SCREAMING_SNAKE_CASE__ )
files.append(SCREAMING_SNAKE_CASE__ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 0 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =UniSpeechSatForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =downstream_dict['projector.weight']
__UpperCamelCase =downstream_dict['projector.bias']
__UpperCamelCase =downstream_dict['model.post_net.linear.weight']
__UpperCamelCase =downstream_dict['model.post_net.linear.bias']
return model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =UniSpeechSatForAudioFrameClassification.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =downstream_dict['model.linear.weight']
__UpperCamelCase =downstream_dict['model.linear.bias']
return model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =UniSpeechSatForXVector.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =downstream_dict['connector.weight']
__UpperCamelCase =downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__UpperCamelCase =downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
__UpperCamelCase =downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
__UpperCamelCase =downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
__UpperCamelCase =downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
__UpperCamelCase =downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
__UpperCamelCase =downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
__UpperCamelCase =downstream_dict['objective.W']
return model
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
__UpperCamelCase =checkpoint['Downstream']
__UpperCamelCase =UniSpeechSatConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =WavaVecaFeatureExtractor.from_pretrained(
SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
__UpperCamelCase =convert_classification(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif arch.endswith('ForAudioFrameClassification' ):
__UpperCamelCase =convert_diarization(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif arch.endswith('ForXVector' ):
__UpperCamelCase =convert_xvector(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
__UpperCamelCase =checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_A = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 717 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 0 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int=10_24 ):
__UpperCamelCase , __UpperCamelCase =[], []
__UpperCamelCase =list(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase , __UpperCamelCase =sorted_examples[0]
def is_too_big(SCREAMING_SNAKE_CASE__ : str ):
return tok(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__UpperCamelCase =new_src + ' ' + src
__UpperCamelCase =new_tgt + ' ' + tgt
if is_too_big(SCREAMING_SNAKE_CASE__ ) or is_too_big(SCREAMING_SNAKE_CASE__ ): # cant fit, finalize example
finished_src.append(SCREAMING_SNAKE_CASE__ )
finished_tgt.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase , __UpperCamelCase =src, tgt
else: # can fit, keep adding
__UpperCamelCase , __UpperCamelCase =cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(SCREAMING_SNAKE_CASE__ )
finished_tgt.append(SCREAMING_SNAKE_CASE__ )
return finished_src, finished_tgt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =Path(SCREAMING_SNAKE_CASE__ )
save_path.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
for split in ["train"]:
__UpperCamelCase , __UpperCamelCase =data_dir / F'{split}.source', data_dir / F'{split}.target'
__UpperCamelCase =[x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()]
__UpperCamelCase =[x.rstrip() for x in Path(SCREAMING_SNAKE_CASE__ ).open().readlines()]
__UpperCamelCase , __UpperCamelCase =pack_examples(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'packed {split} split from {len(SCREAMING_SNAKE_CASE__ )} examples -> {len(SCREAMING_SNAKE_CASE__ )}.' )
Path(save_path / F'{split}.source' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
Path(save_path / F'{split}.target' ).open('w' ).write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
for split in ["val", "test"]:
__UpperCamelCase , __UpperCamelCase =data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / F'{split}.source' )
shutil.copyfile(SCREAMING_SNAKE_CASE__ , save_path / F'{split}.target' )
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=SCREAMING_SNAKE_CASE__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=SCREAMING_SNAKE_CASE__ , default=1_28 )
parser.add_argument('--data_dir' , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument('--save_path' , type=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =parser.parse_args()
__UpperCamelCase =AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(SCREAMING_SNAKE_CASE__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 718 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self ) -> List[str]:
# test for the above condition
self.test()
def _a ( self ) -> Tuple:
__UpperCamelCase =0
__UpperCamelCase =False
while not completed:
if counter == 1:
self.reset()
__UpperCamelCase =self.advance()
if not self.does_advance(A_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =self.update(A_ )
counter += 1
if counter > 10000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def _a ( self ) -> int:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _a ( self , A_ ) -> Dict:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _a ( self , A_ ) -> Dict:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _a ( self ) -> Optional[Any]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _a ( self ) -> str:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _a ( self , A_=False ) -> Optional[Any]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ ) -> str:
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
__UpperCamelCase =token_ids
__UpperCamelCase =len(self.token_ids )
__UpperCamelCase =-1 # the index of the currently fulfilled step
__UpperCamelCase =False
def _a ( self ) -> Tuple:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _a ( self , A_ ) -> List[Any]:
if not isinstance(A_ , A_ ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _a ( self , A_ ) -> int:
if not isinstance(A_ , A_ ):
raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(A_ )}' )
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
if self.does_advance(A_ ):
self.fulfilled_idx += 1
__UpperCamelCase =True
if self.fulfilled_idx == (self.seqlen - 1):
__UpperCamelCase =True
__UpperCamelCase =completed
else:
# failed to make progress.
__UpperCamelCase =True
self.reset()
return stepped, completed, reset
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =False
__UpperCamelCase =0
def _a ( self ) -> str:
return self.seqlen - (self.fulfilled_idx + 1)
def _a ( self , A_=False ) -> Optional[int]:
__UpperCamelCase =PhrasalConstraint(self.token_ids )
if stateful:
__UpperCamelCase =self.seqlen
__UpperCamelCase =self.fulfilled_idx
__UpperCamelCase =self.completed
return new_constraint
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=True ) -> Union[str, Any]:
__UpperCamelCase =max([len(A_ ) for one in nested_token_ids] )
__UpperCamelCase ={}
for token_ids in nested_token_ids:
__UpperCamelCase =root
for tidx, token_id in enumerate(A_ ):
if token_id not in level:
__UpperCamelCase ={}
__UpperCamelCase =level[token_id]
if no_subsets and self.has_subsets(A_ , A_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
f' {nested_token_ids}.' )
__UpperCamelCase =root
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =self.trie
for current_token in current_seq:
__UpperCamelCase =start[current_token]
__UpperCamelCase =list(start.keys() )
return next_tokens
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase =self.next_tokens(A_ )
return len(A_ ) == 0
def _a ( self , A_ ) -> int:
__UpperCamelCase =list(root.values() )
if len(A_ ) == 0:
return 1
else:
return sum([self.count_leaves(A_ ) for nn in next_nodes] )
def _a ( self , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =self.count_leaves(A_ )
return len(A_ ) != leaf_count
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ ) -> Tuple:
super(A_ , self ).__init__()
if not isinstance(A_ , A_ ) or len(A_ ) == 0:
raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(A_ , A_ ) for token_ids in nested_token_ids ):
raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(A_ , A_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
__UpperCamelCase =DisjunctiveTrie(A_ )
__UpperCamelCase =nested_token_ids
__UpperCamelCase =self.trie.max_height
__UpperCamelCase =[]
__UpperCamelCase =False
def _a ( self ) -> str:
__UpperCamelCase =self.trie.next_tokens(self.current_seq )
if len(A_ ) == 0:
return None
else:
return token_list
def _a ( self , A_ ) -> Union[str, Any]:
if not isinstance(A_ , A_ ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}' )
__UpperCamelCase =self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _a ( self , A_ ) -> Union[str, Any]:
if not isinstance(A_ , A_ ):
raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(A_ )}' )
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
if self.does_advance(A_ ):
self.current_seq.append(A_ )
__UpperCamelCase =True
else:
__UpperCamelCase =True
self.reset()
__UpperCamelCase =self.trie.reached_leaf(self.current_seq )
__UpperCamelCase =completed
return stepped, completed, reset
def _a ( self ) -> Any:
__UpperCamelCase =False
__UpperCamelCase =[]
def _a ( self ) -> Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _a ( self , A_=False ) -> str:
__UpperCamelCase =DisjunctiveConstraint(self.token_ids )
if stateful:
__UpperCamelCase =self.seqlen
__UpperCamelCase =self.current_seq
__UpperCamelCase =self.completed
return new_constraint
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> Optional[int]:
__UpperCamelCase =constraints
# max # of steps required to fulfill a given constraint
__UpperCamelCase =max([c.seqlen for c in constraints] )
__UpperCamelCase =len(A_ )
__UpperCamelCase =False
self.init_state()
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =[]
__UpperCamelCase =None
__UpperCamelCase =[constraint.copy(stateful=A_ ) for constraint in self.constraints]
def _a ( self ) -> int:
__UpperCamelCase =0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _a ( self ) -> Any:
__UpperCamelCase =[]
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__UpperCamelCase =constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
else:
__UpperCamelCase =self.inprogress_constraint.advance()
if isinstance(A_ , A_ ):
token_list.append(A_ )
elif isinstance(A_ , A_ ):
token_list.extend(A_ )
if len(A_ ) == 0:
return None
else:
return token_list
def _a ( self , A_ ) -> Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__UpperCamelCase , __UpperCamelCase =self.add(A_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def _a ( self , A_ ) -> Tuple:
if not isinstance(A_ , A_ ):
raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' )
__UpperCamelCase , __UpperCamelCase =False, False
if self.completed:
__UpperCamelCase =True
__UpperCamelCase =False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =self.inprogress_constraint.update(A_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=A_ ) )
__UpperCamelCase =None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__UpperCamelCase =None
if len(self.pending_constraints ) == 0:
# we're done!
__UpperCamelCase =True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(A_ ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =pending_constraint.update(A_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(A_ )
__UpperCamelCase =None
if not complete and stepped:
__UpperCamelCase =pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__UpperCamelCase =(
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__UpperCamelCase =True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _a ( self , A_=True ) -> int:
__UpperCamelCase =ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__UpperCamelCase =[
constraint.copy(stateful=A_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__UpperCamelCase =self.inprogress_constraint.copy(stateful=A_ )
__UpperCamelCase =[constraint.copy() for constraint in self.pending_constraints]
return new_state
| 719 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "big_bird"
def __init__( self , A_=50358 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu_new" , A_=0.1 , A_=0.1 , A_=4096 , A_=2 , A_=0.02 , A_=1E-12 , A_=True , A_=0 , A_=1 , A_=2 , A_=66 , A_="block_sparse" , A_=True , A_=False , A_=64 , A_=3 , A_=None , **A_ , ) -> Tuple:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =type_vocab_size
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =use_cache
__UpperCamelCase =rescale_embeddings
__UpperCamelCase =attention_type
__UpperCamelCase =use_bias
__UpperCamelCase =block_size
__UpperCamelCase =num_random_blocks
__UpperCamelCase =classifier_dropout
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 720 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 0 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
_A = get_tests_dir('fixtures')
class UpperCAmelCase__ ( unittest.TestCase ):
def _a ( self ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
__UpperCamelCase =mock.Mock()
__UpperCamelCase =500
__UpperCamelCase ={}
__UpperCamelCase =HTTPError
__UpperCamelCase ={}
# Download this model to make sure it's in the cache.
__UpperCamelCase =ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=A_ ) as mock_head:
__UpperCamelCase =ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def _a ( self ) -> str:
# This test is for deprecated behavior and can be removed in v5
__UpperCamelCase =ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def _a ( self ) -> Any:
with self.assertRaises(A_ ):
# config is in subfolder, the following should not work without specifying the subfolder
__UpperCamelCase =AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
__UpperCamelCase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants' , subfolder='feature_extractor' )
self.assertIsNotNone(A_ )
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
@classmethod
def _a ( cls ) -> List[str]:
__UpperCamelCase =TOKEN
HfFolder.save_token(A_ )
@classmethod
def _a ( cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTImageProcessor.from_pretrained(A_ )
image_processor.push_to_hub('test-image-processor' , use_auth_token=self._token )
__UpperCamelCase =ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A_ , repo_id='test-image-processor' , push_to_hub=A_ , use_auth_token=self._token )
__UpperCamelCase =ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def _a ( self ) -> Dict:
__UpperCamelCase =ViTImageProcessor.from_pretrained(A_ )
image_processor.push_to_hub('valid_org/test-image-processor' , use_auth_token=self._token )
__UpperCamelCase =ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A_ , repo_id='valid_org/test-image-processor-org' , push_to_hub=A_ , use_auth_token=self._token )
__UpperCamelCase =ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A_ , getattr(A_ , A_ ) )
def _a ( self ) -> Union[str, Any]:
CustomImageProcessor.register_for_auto_class()
__UpperCamelCase =CustomImageProcessor.from_pretrained(A_ )
image_processor.push_to_hub('test-dynamic-image-processor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'} , )
__UpperCamelCase =AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=A_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , 'CustomImageProcessor' )
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCamelCase ( snake_case__):
if isinstance(snake_case__ , collections.abc.Iterable):
return x
return (x, x)
@require_flax
class __snake_case :
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Tuple ) -> str:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : np.ndarray ,lowerCAmelCase__ : np.ndarray ,lowerCAmelCase__ : float ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = np.abs((a - b) ).max()
self.assertLessEqual(lowerCAmelCase__ ,lowerCAmelCase__ ,f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Union[str, Any]=None ,**lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = model(input_ids=lowerCAmelCase__ ,pixel_values=lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape ,(pixel_values.shape[0], config.projection_dim) )
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : List[str]=None ,**lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.get_vision_text_model(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = {"vision_model": vision_model, "text_model": text_model}
lowerCAmelCase_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
lowerCAmelCase_ : int = model(input_ids=lowerCAmelCase__ ,pixel_values=lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ )
self.assertEqual(output["text_embeds"].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : str ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Dict=None ,**lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : int = {"vision_model": vision_model, "text_model": text_model}
lowerCAmelCase_ : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = model(input_ids=lowerCAmelCase__ ,pixel_values=lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ )
lowerCAmelCase_ : str = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
lowerCAmelCase_ : int = model(input_ids=lowerCAmelCase__ ,pixel_values=lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ )
lowerCAmelCase_ : int = after_output[0]
lowerCAmelCase_ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ ,1e-3 )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict=None ,**lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.get_vision_text_model(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : str = {"vision_model": vision_model, "text_model": text_model}
lowerCAmelCase_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = model(
input_ids=lowerCAmelCase__ ,pixel_values=lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,output_attentions=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase_ : str = to_atuple(vision_model.config.image_size )
lowerCAmelCase_ : Union[str, Any] = to_atuple(vision_model.config.patch_size )
lowerCAmelCase_ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase_ : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase_ : Dict = output.text_model_output.attentions
self.assertEqual(len(lowerCAmelCase__ ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
pt_model.to(lowerCAmelCase__ )
pt_model.eval()
# prepare inputs
lowerCAmelCase_ : Dict = inputs_dict
lowerCAmelCase_ : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**lowerCAmelCase__ ).to_tuple()
lowerCAmelCase_ : Optional[int] = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) ,len(lowerCAmelCase__ ) ,"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ ,pt_output.numpy() ,4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ,from_pt=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) ,len(lowerCAmelCase__ ) ,"Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(lowerCAmelCase__ ,pt_output.numpy() ,4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
lowerCAmelCase_ : str = VisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ ,from_flax=lowerCAmelCase__ )
pt_model_loaded.to(lowerCAmelCase__ )
pt_model_loaded.eval()
with torch.no_grad():
lowerCAmelCase_ : Any = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) ,len(lowerCAmelCase__ ) ,"Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowerCAmelCase__ ,pt_output_loaded.numpy() ,4e-2 )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : str = VisionTextDualEncoderModel(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
lowerCAmelCase_ : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = fx_state
self.check_pt_flax_equivalence(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = VisionTextDualEncoderModel(lowerCAmelCase__ )
lowerCAmelCase_ : str = FlaxVisionTextDualEncoderModel(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = load_flax_weights_in_pytorch_model(lowerCAmelCase__ ,fx_model.params )
self.check_pt_flax_equivalence(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : str = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = self.prepare_config_and_inputs()
self.check_save_load(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowerCAmelCase__ )
@is_pt_flax_cross_test
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ : List[str] = config_inputs_dict.pop("vision_config" )
lowerCAmelCase_ : Tuple = config_inputs_dict.pop("text_config" )
lowerCAmelCase_ : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
self.check_equivalence_flax_to_pt(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.get_pretrained_model_and_inputs()
lowerCAmelCase_ : Dict = model_a(**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = model_a(**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = after_outputs[0]
lowerCAmelCase_ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ ,1e-5 )
@require_flax
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" ,"hf-internal-testing/tiny-bert" ,vision_from_pt=lowerCAmelCase__ ,text_from_pt=lowerCAmelCase__ ,)
lowerCAmelCase_ : List[str] = 13
lowerCAmelCase_ : Tuple = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCAmelCase_ : Any = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
lowerCAmelCase_ : Any = random_attention_mask([batch_size, 4] )
lowerCAmelCase_ : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = FlaxViTModel(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = FlaxViTModelTester(self )
lowerCAmelCase_ : Dict = FlaxBertModelTester(self )
lowerCAmelCase_ : Any = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : int = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = vision_config_and_inputs
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" ,"hf-internal-testing/tiny-bert" ,vision_from_pt=lowerCAmelCase__ ,text_from_pt=lowerCAmelCase__ ,)
lowerCAmelCase_ : List[str] = 13
lowerCAmelCase_ : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCAmelCase_ : int = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
lowerCAmelCase_ : Optional[int] = random_attention_mask([batch_size, 4] )
lowerCAmelCase_ : List[str] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Any = FlaxCLIPVisionModel(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = FlaxBertModel(lowerCAmelCase__ )
return vision_model, text_model
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : str = FlaxCLIPVisionModelTester(self )
lowerCAmelCase_ : str = FlaxBertModelTester(self )
lowerCAmelCase_ : List[Any] = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase_ : Optional[int] = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ : str = vision_config_and_inputs
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" ,logit_scale_init_value=1.0 )
lowerCAmelCase_ : Dict = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
lowerCAmelCase_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase_ : List[str] = processor(
text=["una foto di un gatto", "una foto di un cane"] ,images=lowerCAmelCase__ ,padding=lowerCAmelCase__ ,return_tensors="np" )
lowerCAmelCase_ : int = model(**lowerCAmelCase__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
lowerCAmelCase_ : Tuple = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,lowerCAmelCase__ ,atol=1e-3 ) )
| 683 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 1 |
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if height >= 1:
move_tower(height - 1 , snake_case__ , snake_case__ , snake_case__)
move_disk(snake_case__ , snake_case__)
move_tower(height - 1 , snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
print("moving disk from" , snake_case__ , "to" , snake_case__)
def UpperCamelCase ( ):
lowerCAmelCase_ : Union[str, Any] = int(input("Height of hanoi: ").strip())
move_tower(snake_case__ , "A" , "B" , "C")
if __name__ == "__main__":
main()
| 683 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowercase = None
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case__ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def UpperCamelCase ( snake_case__):
def remove_articles(snake_case__):
return ARTICLES_REGEX.sub(" " , snake_case__)
def white_space_fix(snake_case__):
return " ".join(text.split())
def remove_punc(snake_case__):
lowerCAmelCase_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def UpperCamelCase ( snake_case__):
if not s:
return []
return normalize_answer(snake_case__).split()
def UpperCamelCase ( snake_case__ , snake_case__):
return int(normalize_answer(snake_case__) == normalize_answer(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = get_tokens(snake_case__)
lowerCAmelCase_ : Union[str, Any] = get_tokens(snake_case__)
lowerCAmelCase_ : Any = collections.Counter(snake_case__) & collections.Counter(snake_case__)
lowerCAmelCase_ : Dict = sum(common.values())
if len(snake_case__) == 0 or len(snake_case__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : int = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : int = qa["id"]
lowerCAmelCase_ : Any = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCAmelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : Any = max(compute_exact(snake_case__ , snake_case__) for a in gold_answers)
lowerCAmelCase_ : Optional[Any] = max(compute_fa(snake_case__ , snake_case__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = {}
for qid, s in scores.items():
lowerCAmelCase_ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : List[str] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase_ : Union[str, Any] = s
return new_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
if not qid_list:
lowerCAmelCase_ : Any = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCAmelCase_ : Tuple = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
plt.step(snake_case__ , snake_case__ , color="b" , alpha=0.2 , where="post")
plt.fill_between(snake_case__ , snake_case__ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(snake_case__)
plt.savefig(snake_case__)
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
lowerCAmelCase_ : List[Any] = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Tuple = [1.0]
lowerCAmelCase_ : Tuple = [0.0]
lowerCAmelCase_ : Dict = 0.0
for i, qid in enumerate(snake_case__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : str = true_pos / float(i + 1)
lowerCAmelCase_ : Union[str, Any] = true_pos / float(snake_case__)
if i == len(snake_case__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__)
recalls.append(snake_case__)
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return {"ap": 100.0 * avg_prec}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
if out_image_dir and not os.path.exists(snake_case__):
os.makedirs(snake_case__)
lowerCAmelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase_ : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ : Dict = {k: float(snake_case__) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : str = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case__ , snake_case__ , "pr_exact")
merge_eval(snake_case__ , snake_case__ , "pr_f1")
merge_eval(snake_case__ , snake_case__ , "pr_oracle")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if not qid_list:
return
lowerCAmelCase_ : Optional[Any] = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : Dict = np.ones_like(snake_case__) / float(len(snake_case__))
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(snake_case__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase_ : str = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : str = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
for i, qid in enumerate(snake_case__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : List[Any] = -1
else:
lowerCAmelCase_ : List[str] = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[Any] = cur_score
lowerCAmelCase_ : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(snake_case__), best_thresh
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = best_exact
lowerCAmelCase_ : List[str] = exact_thresh
lowerCAmelCase_ : Any = best_fa
lowerCAmelCase_ : List[str] = fa_thresh
def UpperCamelCase ( ):
with open(OPTS.data_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
lowerCAmelCase_ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCAmelCase_ : int = json.load(snake_case__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
else:
lowerCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
lowerCAmelCase_ : Tuple = make_qid_to_has_ans(snake_case__) # maps qid to True/False
lowerCAmelCase_ : Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_raw_scores(snake_case__ , snake_case__)
lowerCAmelCase_ : str = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Dict = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__)
if has_ans_qids:
lowerCAmelCase_ : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "HasAns")
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir)
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(snake_case__ , snake_case__)
else:
print(json.dumps(snake_case__ , indent=2))
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 683 | 1 |
class __snake_case :
"""simple docstring"""
def __init__( self : List[str] ,lowerCAmelCase__ : list ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = set_counts
lowerCAmelCase_ : Union[str, Any] = max(lowerCAmelCase__ )
lowerCAmelCase_ : int = len(lowerCAmelCase__ )
lowerCAmelCase_ : str = [1] * num_sets
lowerCAmelCase_ : List[str] = list(range(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = self.get_parent(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.get_parent(lowerCAmelCase__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Any = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCAmelCase_ : List[str] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[Any] = src_parent
lowerCAmelCase_ : Tuple = self.set_counts[src_parent]
lowerCAmelCase_ : Dict = max(self.max_set ,lowerCAmelCase__ )
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowerCAmelCase_ : List[Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 683 |
from math import sqrt
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = 0
for i in range(1 , int(sqrt(snake_case__) + 1)):
if n % i == 0 and i != sqrt(snake_case__):
total += i + n // i
elif i == sqrt(snake_case__):
total += i
return total - n
def UpperCamelCase ( snake_case__ = 1_00_00):
lowerCAmelCase_ : int = sum(
i
for i in range(1 , snake_case__)
if sum_of_divisors(sum_of_divisors(snake_case__)) == i and sum_of_divisors(snake_case__) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 683 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_lowercase = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"])
def UpperCamelCase ( snake_case__ , snake_case__):
inspect_dataset(snake_case__ , snake_case__)
lowerCAmelCase_ : Tuple = path + ".py"
assert script_name in os.listdir(snake_case__)
assert "__pycache__" not in os.listdir(snake_case__)
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning")
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning")
@pytest.mark.parametrize("path" , ["accuracy"])
def UpperCamelCase ( snake_case__ , snake_case__):
inspect_metric(snake_case__ , snake_case__)
lowerCAmelCase_ : Any = path + ".py"
assert script_name in os.listdir(snake_case__)
assert "__pycache__" not in os.listdir(snake_case__)
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = get_dataset_config_info(snake_case__ , config_name=snake_case__)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
with pytest.raises(snake_case__):
get_dataset_config_info(snake_case__ , config_name=snake_case__)
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = get_dataset_config_names(snake_case__)
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = get_dataset_infos(snake_case__)
assert list(infos.keys()) == expected_configs
lowerCAmelCase_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
lowerCAmelCase_ : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = get_dataset_infos(snake_case__)
assert expected_config in infos
lowerCAmelCase_ : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
with pytest.raises(snake_case__):
get_dataset_split_names(snake_case__ , config_name=snake_case__)
| 683 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {'''configuration_vit_msn''': ['''VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMSNConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMSNModel''',
'''ViTMSNForImageClassification''',
'''ViTMSNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : str = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Union[str, Any] = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Optional[Any]="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : int="<mask>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : int ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Dict = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[Any] = bytes_to_unicode()
lowerCAmelCase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Dict = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Tuple = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Optional[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Union[str, Any] = " " + text
return (text, kwargs)
| 683 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : str ,*lowerCAmelCase__ : Optional[int] ,**lowerCAmelCase__ : Any ) -> None:
'''simple docstring'''
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." ,lowerCAmelCase__ ,)
super().__init__(*lowerCAmelCase__ ,**lowerCAmelCase__ )
| 683 |
from collections.abc import Iterable
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : int | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} ,indent=1 )
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Node | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = root
def __str__( self : Dict ) -> str:
'''simple docstring'''
return str(self.root )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Node ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : Any = new_children
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Node ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.root is None
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ : Optional[int] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ : List[str] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ : Dict = new_node
break
else:
lowerCAmelCase_ : str = parent_node.right
lowerCAmelCase_ : Optional[int] = parent_node
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int] ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ : Union[str, Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ : Union[str, Any] = node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowerCAmelCase_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ : Dict = self.root
while node.left is not None:
lowerCAmelCase_ : Union[str, Any] = node.left
return node
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ ,node.left )
else:
lowerCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node | None ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : list ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if node:
self.inorder(lowerCAmelCase__ ,node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ ,node.right )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Node ) -> int:
'''simple docstring'''
lowerCAmelCase_ : list[int] = []
self.inorder(lowerCAmelCase__ ,lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = []
if curr_node is not None:
lowerCAmelCase_ : Dict = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(snake_case__)
# Prints all the elements of the list in order traversal
print(snake_case__)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case__)
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'ibert'
def __init__( self : str ,lowerCAmelCase__ : Optional[int]=3_05_22 ,lowerCAmelCase__ : Optional[Any]=7_68 ,lowerCAmelCase__ : Any=12 ,lowerCAmelCase__ : List[Any]=12 ,lowerCAmelCase__ : Union[str, Any]=30_72 ,lowerCAmelCase__ : Tuple="gelu" ,lowerCAmelCase__ : List[Any]=0.1 ,lowerCAmelCase__ : int=0.1 ,lowerCAmelCase__ : Optional[Any]=5_12 ,lowerCAmelCase__ : List[str]=2 ,lowerCAmelCase__ : Any=0.02 ,lowerCAmelCase__ : Optional[Any]=1e-1_2 ,lowerCAmelCase__ : List[str]=1 ,lowerCAmelCase__ : Dict=0 ,lowerCAmelCase__ : Dict=2 ,lowerCAmelCase__ : Union[str, Any]="absolute" ,lowerCAmelCase__ : Any=False ,lowerCAmelCase__ : Tuple="none" ,**lowerCAmelCase__ : Tuple ,) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ ,bos_token_id=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Union[str, Any] = intermediate_size
lowerCAmelCase_ : Optional[int] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = type_vocab_size
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : List[str] = position_embedding_type
lowerCAmelCase_ : List[Any] = quant_mode
lowerCAmelCase_ : Optional[Any] = force_dequant
class __snake_case ( snake_case__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase_ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_ : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 683 |
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683 | 1 |
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'EncodecFeatureExtractor'
UpperCamelCase_ = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = self.feature_extractor
lowerCAmelCase_ : List[str] = False
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Dict=None ,lowerCAmelCase__ : Optional[Any]=None ,lowerCAmelCase__ : Union[str, Any]=True ) -> Dict:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase__ ,language=lowerCAmelCase__ ,no_timestamps=lowerCAmelCase__ )
def __call__( self : int ,*lowerCAmelCase__ : int ,**lowerCAmelCase__ : List[str] ) -> str:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("audio" ,lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = kwargs.pop("sampling_rate" ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = kwargs.pop("text" ,lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowerCAmelCase_ : Dict = args[0]
lowerCAmelCase_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
lowerCAmelCase_ : Union[str, Any] = self.tokenizer(lowerCAmelCase__ ,**lowerCAmelCase__ )
if audio is not None:
lowerCAmelCase_ : int = self.feature_extractor(lowerCAmelCase__ ,*lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,**lowerCAmelCase__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
lowerCAmelCase_ : Union[str, Any] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
lowerCAmelCase_ : List[Any] = audio_inputs["padding_mask"]
return inputs
def UpperCAmelCase_ ( self : Dict ,*lowerCAmelCase__ : Optional[Any] ,**lowerCAmelCase__ : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = kwargs.pop("audio" ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = kwargs.pop("padding_mask" ,lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowerCAmelCase_ : int = args[0]
lowerCAmelCase_ : Union[str, Any] = args[1:]
if audio_values is not None:
return self._decode_audio(lowerCAmelCase__ ,padding_mask=lowerCAmelCase__ )
else:
return self.tokenizer.batch_decode(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,*lowerCAmelCase__ : Union[str, Any] ,**lowerCAmelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional = None ) -> List[np.ndarray]:
'''simple docstring'''
lowerCAmelCase_ : int = to_numpy(lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = audio_values.shape
if padding_mask is None:
return list(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = to_numpy(lowerCAmelCase__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
lowerCAmelCase_ : int = seq_len - padding_mask.shape[-1]
lowerCAmelCase_ : str = 1 - self.feature_extractor.padding_value
lowerCAmelCase_ : str = np.pad(lowerCAmelCase__ ,((0, 0), (0, difference)) ,"constant" ,constant_values=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = audio_values.tolist()
for i in range(lowerCAmelCase__ ):
lowerCAmelCase_ : Dict = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
lowerCAmelCase_ : Dict = sliced_audio.reshape(lowerCAmelCase__ ,-1 )
return audio_values
| 683 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor")
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor")
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor")
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowercase = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['pixel_values']
def __init__( self : Optional[int] ,lowerCAmelCase__ : bool = True ,lowerCAmelCase__ : int = 32 ,lowerCAmelCase__ : Tuple=PILImageResampling.BILINEAR ,lowerCAmelCase__ : bool = True ,**lowerCAmelCase__ : Union[str, Any] ,) -> None:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = do_resize
lowerCAmelCase_ : Dict = do_rescale
lowerCAmelCase_ : Tuple = size_divisor
lowerCAmelCase_ : str = resample
super().__init__(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : np.ndarray ,lowerCAmelCase__ : int ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[ChannelDimension] = None ,**lowerCAmelCase__ : Optional[int] ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = get_image_size(lowerCAmelCase__ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase_ : Dict = height // size_divisor * size_divisor
lowerCAmelCase_ : List[str] = width // size_divisor * size_divisor
lowerCAmelCase_ : str = resize(lowerCAmelCase__ ,(new_h, new_w) ,resample=lowerCAmelCase__ ,data_format=lowerCAmelCase__ ,**lowerCAmelCase__ )
return image
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : np.ndarray ,lowerCAmelCase__ : float ,lowerCAmelCase__ : Optional[ChannelDimension] = None ,**lowerCAmelCase__ : Union[str, Any] ) -> np.ndarray:
'''simple docstring'''
return rescale(image=lowerCAmelCase__ ,scale=lowerCAmelCase__ ,data_format=lowerCAmelCase__ ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Union[str, Any]=None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[Union[TensorType, str]] = None ,lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST ,**lowerCAmelCase__ : List[Any] ,) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ : List[str] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : str = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase_ : Optional[int] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
lowerCAmelCase_ : str = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
lowerCAmelCase_ : str = [to_numpy_array(lowerCAmelCase__ ) for img in images]
if do_resize:
lowerCAmelCase_ : int = [self.resize(lowerCAmelCase__ ,size_divisor=lowerCAmelCase__ ,resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase_ : Optional[int] = [self.rescale(lowerCAmelCase__ ,scale=1 / 2_55 ) for image in images]
lowerCAmelCase_ : List[str] = [to_channel_dimension_format(lowerCAmelCase__ ,lowerCAmelCase__ ) for image in images]
lowerCAmelCase_ : Any = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase__ ,tensor_type=lowerCAmelCase__ )
| 683 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = HfArgumentParser(snake_case__)
lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 683 | 1 |
import numpy as np
from PIL import Image
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Any = np.array(snake_case__)
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix")
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = 0
# compute the shape of the output matrix
lowerCAmelCase_ : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowerCAmelCase_ : Optional[Any] = np.zeros((maxpool_shape, maxpool_shape))
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowerCAmelCase_ : str = np.max(arr[i : i + size, j : j + size])
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : List[str] = 0
return updated_arr
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = np.array(snake_case__)
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix")
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : str = 0
# compute the shape of the output matrix
lowerCAmelCase_ : Tuple = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowerCAmelCase_ : Dict = np.zeros((avgpool_shape, avgpool_shape))
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowerCAmelCase_ : Optional[Any] = int(np.average(arr[i : i + size, j : j + size]))
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowerCAmelCase_ : Union[str, Any] = 0
lowerCAmelCase_ : List[Any] = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
_lowercase = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 683 |
_lowercase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCamelCase ( snake_case__):
assert type(snake_case__) in (int, float) and decimal == int(snake_case__)
lowerCAmelCase_ : Optional[Any] = int(snake_case__)
lowerCAmelCase_ : Tuple = ""
lowerCAmelCase_ : str = False
if decimal < 0:
lowerCAmelCase_ : Tuple = True
decimal *= -1
while decimal > 0:
lowerCAmelCase_ , lowerCAmelCase_ : Any = divmod(snake_case__ , 16)
lowerCAmelCase_ : Dict = values[remainder] + hexadecimal
lowerCAmelCase_ : List[str] = "0x" + hexadecimal
if negative:
lowerCAmelCase_ : Optional[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
from math import sqrt
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = 0
for i in range(1 , int(sqrt(snake_case__) + 1)):
if n % i == 0 and i != sqrt(snake_case__):
total += i + n // i
elif i == sqrt(snake_case__):
total += i
return total - n
def UpperCamelCase ( snake_case__ = 1_00_00):
lowerCAmelCase_ : int = sum(
i
for i in range(1 , snake_case__)
if sum_of_divisors(sum_of_divisors(snake_case__)) == i and sum_of_divisors(snake_case__) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 683 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase = ['''text''', '''image''', '''audio''']
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((5_12, 5_12)))
elif input_type == "audio":
inputs.append(torch.ones(30_00))
elif isinstance(snake_case__ , snake_case__):
inputs.append(create_inputs(snake_case__))
else:
raise ValueError(F'''Invalid type requested: {input_type}''')
return inputs
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = []
for output in outputs:
if isinstance(snake_case__ , (str, AgentText)):
output_types.append("text")
elif isinstance(snake_case__ , (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(snake_case__ , (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(F'''Invalid output: {output}''')
return output_types
@is_tool_test
class __snake_case :
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
lowerCAmelCase_ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase_ : Optional[int] = [outputs]
self.assertListEqual(output_types(lowerCAmelCase__ ) ,self.tool.outputs )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : str = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase__ ,self.tool.outputs ):
lowerCAmelCase_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = []
for _input, input_type in zip(lowerCAmelCase__ ,self.tool.inputs ):
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : int = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
| 683 | 1 |
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Any = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCamelCase ( snake_case__ = 50_00):
lowerCAmelCase_ : List[str] = [(i * (3 * i - 1)) // 2 for i in range(1 , snake_case__)]
for i, pentagonal_i in enumerate(snake_case__):
for j in range(snake_case__ , len(snake_case__)):
lowerCAmelCase_ : int = pentagonal_nums[j]
lowerCAmelCase_ : str = pentagonal_i + pentagonal_j
lowerCAmelCase_ : str = pentagonal_j - pentagonal_i
if is_pentagonal(snake_case__) and is_pentagonal(snake_case__):
return b
return -1
if __name__ == "__main__":
print(f"{solution() = }")
| 683 |
import pytest
_lowercase = '''__dummy_dataset1__'''
_lowercase = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = dataset_loading_script_name
lowerCAmelCase_ : List[str] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=snake_case__)
lowerCAmelCase_ : List[Any] = script_dir / F'''{script_name}.py'''
with open(snake_case__ , "w") as f:
f.write(snake_case__)
return str(snake_case__)
| 683 | 1 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = LxmertTokenizer
UpperCamelCase_ = LxmertTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = "UNwant\u00E9d,running"
lowerCAmelCase_ : str = "unwanted, running"
return input_text, output_text
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase__ ,["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,[7, 4, 5, 10, 8, 9] )
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize(lowerCAmelCase__ )
lowerCAmelCase_ : str = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
| 683 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = CodeGenTokenizer
UpperCamelCase_ = CodeGenTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = {'add_prefix_space': True}
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : int = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : List[Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = "lower newer"
lowerCAmelCase_ : Tuple = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase_ : Dict = "lower newer"
lowerCAmelCase_ : Dict = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Tuple = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing the unknown token
lowerCAmelCase_ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,*lowerCAmelCase__ : List[str] ,**lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Any=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
# Simple input
lowerCAmelCase_ : int = "This is a simple input"
lowerCAmelCase_ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : str = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : Dict = tokenizer.pad_token_id
lowerCAmelCase_ : Union[str, Any] = tokenizer(lowerCAmelCase__ ,padding="max_length" ,max_length=30 ,return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
lowerCAmelCase_ : Any = tokenizer(*lowerCAmelCase__ ,padding="max_length" ,max_length=60 ,return_tensors="np" )
lowerCAmelCase_ : Optional[int] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = "$$$"
lowerCAmelCase_ : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowerCAmelCase__ ,add_bos_token=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : int = tokenizer.bos_token_id
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ )
self.assertEqual(out_s.input_ids[0] ,lowerCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : List[str] = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowerCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowerCAmelCase_ : str = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCAmelCase_ : int = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase__ )
lowerCAmelCase_ : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.decode(lowerCAmelCase__ ,truncate_before_pattern=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
| 683 | 1 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = TextToVideoSDPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCamelCase_ = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
lowerCAmelCase_ : Tuple = DDIMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,clip_sample=lowerCAmelCase__ ,set_alpha_to_one=lowerCAmelCase__ ,)
torch.manual_seed(0 )
lowerCAmelCase_ : int = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=1_28 ,)
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,hidden_act="gelu" ,projection_dim=5_12 ,)
lowerCAmelCase_ : Dict = CLIPTextModel(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : int = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : List[str]=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[str] = self.get_dummy_components()
lowerCAmelCase_ : int = TextToVideoSDPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : Any = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = "np"
lowerCAmelCase_ : Dict = sd_pipe(**lowerCAmelCase__ ).frames
lowerCAmelCase_ : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCAmelCase_ : Optional[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase__ ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase__ ,expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
lowerCAmelCase_ : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
lowerCAmelCase_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase_ : Optional[Any] = pipe.to("cuda" )
lowerCAmelCase_ : List[str] = "Spiderman is surfing"
lowerCAmelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase_ : Any = pipe(lowerCAmelCase__ ,generator=lowerCAmelCase__ ,num_inference_steps=25 ,output_type="pt" ).frames
lowerCAmelCase_ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
lowerCAmelCase_ : Union[str, Any] = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
lowerCAmelCase_ : str = pipe.to("cuda" )
lowerCAmelCase_ : str = "Spiderman is surfing"
lowerCAmelCase_ : Union[str, Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase_ : List[str] = pipe(lowerCAmelCase__ ,generator=lowerCAmelCase__ ,num_inference_steps=2 ,output_type="pt" ).frames
lowerCAmelCase_ : Optional[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 683 |
from __future__ import annotations
from random import random
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : int | None = None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Any = random()
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Any ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} ,indent=1 )
def __str__( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = str(self.value ) + " "
lowerCAmelCase_ : List[Any] = str(self.left or "" )
lowerCAmelCase_ : Union[str, Any] = str(self.right or "" )
return value + left + right
def UpperCamelCase ( snake_case__ , snake_case__):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase_ , lowerCAmelCase_ : Any = split(root.left , snake_case__)
return left, root
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = split(root.right , snake_case__)
return root, right
def UpperCamelCase ( snake_case__ , snake_case__):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase_ : Dict = merge(left.right , snake_case__)
return left
else:
lowerCAmelCase_ : List[str] = merge(snake_case__ , right.left)
return right
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = Node(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = split(snake_case__ , snake_case__)
return merge(merge(snake_case__ , snake_case__) , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = split(snake_case__ , value - 1)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = split(snake_case__ , snake_case__)
return merge(snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__):
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=",")
inorder(root.right)
def UpperCamelCase ( snake_case__ , snake_case__):
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase_ : List[str] = insert(snake_case__ , int(arg[1:]))
elif arg[0] == "-":
lowerCAmelCase_ : Optional[int] = erase(snake_case__ , int(arg[1:]))
else:
print("Unknown command")
return root
def UpperCamelCase ( ):
lowerCAmelCase_ : str = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. ")
lowerCAmelCase_ : str = input()
while args != "q":
lowerCAmelCase_ : int = interact_treap(snake_case__ , snake_case__)
print(snake_case__)
lowerCAmelCase_ : str = input()
print("good by!")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 1 |
from collections import defaultdict
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = first_str.lower().strip()
lowerCAmelCase_ : Optional[Any] = second_str.lower().strip()
# Remove whitespace
lowerCAmelCase_ : Union[str, Any] = first_str.replace(" " , "")
lowerCAmelCase_ : Union[str, Any] = second_str.replace(" " , "")
# Strings of different lengths are not anagrams
if len(snake_case__) != len(snake_case__):
return False
# Default values for count should be 0
lowerCAmelCase_ : defaultdict[str, int] = defaultdict(snake_case__)
# For each character in input strings,
# increment count in the corresponding
for i in range(len(snake_case__)):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values())
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = input('''Enter the first string ''').strip()
_lowercase = input('''Enter the second string ''').strip()
_lowercase = check_anagrams(input_a, input_b)
print(f"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 683 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowercase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {f"funnel-transformer/{name}": 512 for name in _model_names}
_lowercase = {f"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = 2
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : List[str]="<unk>" ,lowerCAmelCase__ : int="<sep>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : List[str]="<cls>" ,lowerCAmelCase__ : Optional[int]="<mask>" ,lowerCAmelCase__ : Union[str, Any]="<s>" ,lowerCAmelCase__ : List[str]="</s>" ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : List[Any]="##" ,**lowerCAmelCase__ : int ,) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,clean_text=lowerCAmelCase__ ,tokenize_chinese_chars=lowerCAmelCase__ ,strip_accents=lowerCAmelCase__ ,wordpieces_prefix=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[int] = getattr(lowerCAmelCase__ ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : List[str] = strip_accents
lowerCAmelCase_ : Any = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase_ : int = do_lower_case
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ,lowerCAmelCase__ : str=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : str = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 683 | 1 |
from collections.abc import Iterable
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : int | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} ,indent=1 )
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Node | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = root
def __str__( self : Dict ) -> str:
'''simple docstring'''
return str(self.root )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Node ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : Any = new_children
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Node ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.root is None
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ : Optional[int] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ : List[str] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ : Dict = new_node
break
else:
lowerCAmelCase_ : str = parent_node.right
lowerCAmelCase_ : Optional[int] = parent_node
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int] ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ : Union[str, Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ : Union[str, Any] = node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowerCAmelCase_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ : Dict = self.root
while node.left is not None:
lowerCAmelCase_ : Union[str, Any] = node.left
return node
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ ,node.left )
else:
lowerCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node | None ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : list ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if node:
self.inorder(lowerCAmelCase__ ,node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ ,node.right )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Node ) -> int:
'''simple docstring'''
lowerCAmelCase_ : list[int] = []
self.inorder(lowerCAmelCase__ ,lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = []
if curr_node is not None:
lowerCAmelCase_ : Dict = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(snake_case__)
# Prints all the elements of the list in order traversal
print(snake_case__)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case__)
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__):
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested")
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested")
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested")
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment")
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate")
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule")
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__)
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ : int = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCAmelCase_ : List[Any] = 0
# Doctest custom flag to ignore output.
_lowercase = doctest.register_optionflag('''IGNORE_RESULT''')
_lowercase = doctest.OutputChecker
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
_lowercase = CustomOutputChecker
_lowercase = HfDoctestModule
_lowercase = HfDocTestParser
| 683 | 1 |
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = ""
for i in table:
res += inp[i - 1]
return res
def UpperCamelCase ( snake_case__):
return data[1:] + data[0]
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = ""
for i in range(len(snake_case__)):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : int = int("0b" + data[0] + data[-1] , 2)
lowerCAmelCase_ : Dict = int("0b" + data[1:3] , 2)
return bin(s[row][col])[2:]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = message[:4]
lowerCAmelCase_ : Tuple = message[4:]
lowerCAmelCase_ : List[str] = apply_table(snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = xor(snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = apply_sbox(snake_case__ , temp[:4]) # noqa: E741
lowerCAmelCase_ : Tuple = apply_sbox(snake_case__ , temp[4:])
lowerCAmelCase_ : Union[str, Any] = "0" * (2 - len(snake_case__)) + l # noqa: E741
lowerCAmelCase_ : Dict = "0" * (2 - len(snake_case__)) + r
lowerCAmelCase_ : Dict = apply_table(l + r , snake_case__)
lowerCAmelCase_ : List[str] = xor(snake_case__ , snake_case__)
return temp + right
if __name__ == "__main__":
_lowercase = input('''Enter 10 bit key: ''')
_lowercase = input('''Enter 8 bit message: ''')
_lowercase = [6, 3, 7, 4, 8, 5, 10, 9]
_lowercase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowercase = [2, 4, 3, 1]
_lowercase = [2, 6, 3, 1, 4, 8, 5, 7]
_lowercase = [4, 1, 3, 5, 7, 2, 8, 6]
_lowercase = [4, 1, 2, 3, 2, 3, 4, 1]
_lowercase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowercase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowercase = apply_table(key, paa_table)
_lowercase = temp[:5]
_lowercase = temp[5:]
_lowercase = left_shift(left)
_lowercase = left_shift(right)
_lowercase = apply_table(left + right, pa_table)
_lowercase = left_shift(left)
_lowercase = left_shift(right)
_lowercase = left_shift(left)
_lowercase = left_shift(right)
_lowercase = apply_table(left + right, pa_table)
# encryption
_lowercase = apply_table(message, IP)
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = temp[4:] + temp[:4]
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
_lowercase = apply_table(CT, IP)
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = temp[4:] + temp[:4]
_lowercase = function(expansion, sa, sa, keya, temp)
_lowercase = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 683 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = ['''model.decoder.embed_positions.weights''']
def UpperCamelCase ( snake_case__):
if "emb" in name:
lowerCAmelCase_ : int = name.replace("emb" , "model.decoder.embed_tokens")
if "transformer" in name:
lowerCAmelCase_ : Tuple = name.replace("transformer" , "model.decoder")
if "cross_attention" in name:
lowerCAmelCase_ : List[Any] = name.replace("cross_attention" , "encoder_attn")
if "linear1" in name:
lowerCAmelCase_ : int = name.replace("linear1" , "fc1")
if "linear2" in name:
lowerCAmelCase_ : str = name.replace("linear2" , "fc2")
if "norm1" in name:
lowerCAmelCase_ : Optional[Any] = name.replace("norm1" , "self_attn_layer_norm")
if "norm_cross" in name:
lowerCAmelCase_ : Dict = name.replace("norm_cross" , "encoder_attn_layer_norm")
if "norm2" in name:
lowerCAmelCase_ : Optional[int] = name.replace("norm2" , "final_layer_norm")
if "out_norm" in name:
lowerCAmelCase_ : int = name.replace("out_norm" , "model.decoder.layer_norm")
if "linears" in name:
lowerCAmelCase_ : str = name.replace("linears" , "lm_heads")
if "condition_provider.conditioners.description.output_proj" in name:
lowerCAmelCase_ : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj")
return name
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = list(state_dict.keys())
lowerCAmelCase_ : Dict = {}
for key in keys:
lowerCAmelCase_ : Any = state_dict.pop(snake_case__)
lowerCAmelCase_ : Optional[int] = rename_keys(snake_case__)
if "in_proj_weight" in key:
# split fused qkv proj
lowerCAmelCase_ : List[Any] = val[:hidden_size, :]
lowerCAmelCase_ : List[str] = val[hidden_size : 2 * hidden_size, :]
lowerCAmelCase_ : int = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCAmelCase_ : Union[str, Any] = val
else:
lowerCAmelCase_ : Optional[int] = val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase ( snake_case__):
if checkpoint == "small":
# default config values
lowerCAmelCase_ : int = 10_24
lowerCAmelCase_ : Optional[int] = 24
lowerCAmelCase_ : Optional[int] = 16
elif checkpoint == "medium":
lowerCAmelCase_ : Dict = 15_36
lowerCAmelCase_ : Optional[Any] = 48
lowerCAmelCase_ : Optional[Any] = 24
elif checkpoint == "large":
lowerCAmelCase_ : Optional[Any] = 20_48
lowerCAmelCase_ : Optional[Any] = 48
lowerCAmelCase_ : int = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''')
lowerCAmelCase_ : Dict = MusicgenDecoderConfig(
hidden_size=snake_case__ , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , )
return config
@torch.no_grad()
def UpperCamelCase ( snake_case__ , snake_case__=None , snake_case__=None , snake_case__="cpu"):
lowerCAmelCase_ : List[Any] = MusicGen.get_pretrained(snake_case__ , device=snake_case__)
lowerCAmelCase_ : Dict = decoder_config_from_checkpoint(snake_case__)
lowerCAmelCase_ : Dict = fairseq_model.lm.state_dict()
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = rename_state_dict(
snake_case__ , hidden_size=decoder_config.hidden_size)
lowerCAmelCase_ : List[Any] = TaEncoderModel.from_pretrained("t5-base")
lowerCAmelCase_ : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz")
lowerCAmelCase_ : int = MusicgenForCausalLM(snake_case__).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = decoder.load_state_dict(snake_case__ , strict=snake_case__)
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder")) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case__)
if len(snake_case__) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''')
if len(snake_case__) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''')
# init the composite model
lowerCAmelCase_ : List[str] = MusicgenForConditionalGeneration(text_encoder=snake_case__ , audio_encoder=snake_case__ , decoder=snake_case__)
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case__)
# check we can do a forward pass
lowerCAmelCase_ : Tuple = torch.arange(0 , 8 , dtype=torch.long).reshape(2 , -1)
lowerCAmelCase_ : List[str] = input_ids.reshape(2 * 4 , -1)
with torch.no_grad():
lowerCAmelCase_ : Optional[int] = model(input_ids=snake_case__ , decoder_input_ids=snake_case__).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("Incorrect shape for logits")
# now construct the processor
lowerCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained("t5-base")
lowerCAmelCase_ : str = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left")
lowerCAmelCase_ : Optional[Any] = MusicgenProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__)
# set the appropriate bos/pad token ids
lowerCAmelCase_ : Any = 20_48
lowerCAmelCase_ : str = 20_48
# set other default generation config params
lowerCAmelCase_ : int = int(30 * audio_encoder.config.frame_rate)
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : List[Any] = 3.0
if pytorch_dump_folder is not None:
Path(snake_case__).mkdir(exist_ok=snake_case__)
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''')
model.save_pretrained(snake_case__)
processor.save_pretrained(snake_case__)
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''')
model.push_to_hub(snake_case__)
processor.push_to_hub(snake_case__)
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
_lowercase = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 683 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
lowerCAmelCase_ : List[str] = []
# custom device map
if isinstance(snake_case__ , snake_case__) and len(device_map.keys()) > 1:
lowerCAmelCase_ : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : Union[str, Any] = get_keys_to_not_convert(snake_case__)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__)
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__)
# compatibility with peft
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : List[str] = load_in_abit
lowerCAmelCase_ : Optional[int] = get_parameter_device(snake_case__)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
# convert param to the right dtype
lowerCAmelCase_ : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ : Optional[int] = name.replace(".weight" , "").replace(".bias" , "")
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , snake_case__ , snake_case__)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(snake_case__):
param.to(snake_case__)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ : str = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
lowerCAmelCase_ : Optional[int] = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None):
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(snake_case__ , snake_case__):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Union[str, Any] = special_dtypes
lowerCAmelCase_ : Union[str, Any] = no_split_module_classes
lowerCAmelCase_ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
snake_case__ , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case__ , **snake_case__ , )
lowerCAmelCase_ : Tuple = max_memory
lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(snake_case__ , **snake_case__)
if isinstance(snake_case__ , snake_case__):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
if modules_to_not_convert is None:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : Optional[int] = []
current_key_name.append(snake_case__)
if isinstance(snake_case__ , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : Optional[int] = ".".join(snake_case__)
lowerCAmelCase_ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
lowerCAmelCase_ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : Any = module.bias.data
bnb_module.requires_grad_(snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = True
if len(list(module.children())) > 0:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def UpperCamelCase ( snake_case__):
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ : List[Any] = deepcopy(snake_case__) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Dict = find_tied_parameters(snake_case__)
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ : Optional[Any] = sum(snake_case__ , [])
lowerCAmelCase_ : List[Any] = len(snake_case__) > 0
# Check if it is a base model
lowerCAmelCase_ : List[str] = False
if hasattr(snake_case__ , "base_model_prefix"):
lowerCAmelCase_ : Tuple = not hasattr(snake_case__ , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : Union[str, Any] = list(model.named_children())
lowerCAmelCase_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Any = set(snake_case__) - set(snake_case__)
lowerCAmelCase_ : Tuple = list(set(snake_case__)) + list(snake_case__)
# remove ".weight" from the keys
lowerCAmelCase_ : List[str] = [".weight", ".bias"]
lowerCAmelCase_ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : str = name.replace(snake_case__ , "")
filtered_module_names.append(snake_case__)
return filtered_module_names
def UpperCamelCase ( snake_case__):
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit):
return True
return False
def UpperCamelCase ( snake_case__):
return next(parameter.parameters()).device
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__)
lowerCAmelCase_ : str = param_name
lowerCAmelCase_ : Tuple = model
if "." in tensor_name:
lowerCAmelCase_ : Dict = tensor_name.split(".")
for split in splits[:-1]:
lowerCAmelCase_ : Any = getattr(snake_case__ , snake_case__)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Any = splits[-1]
# offload weights
lowerCAmelCase_ : List[Any] = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__)
offload_weight(snake_case__ , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__)
set_module_tensor_to_device(snake_case__ , snake_case__ , "meta" , dtype=snake_case__ , value=torch.empty(*param.size()))
| 683 | 1 |
from collections import defaultdict
class __snake_case :
"""simple docstring"""
def __init__( self : Tuple ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCAmelCase_ : Dict = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCAmelCase__ ) )
]
lowerCAmelCase_ : Any = defaultdict(lowerCAmelCase__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCAmelCase_ : Union[str, Any] = (1 << len(lowerCAmelCase__ )) - 1
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int ,lowerCAmelCase__ : List[Any] ) -> str:
'''simple docstring'''
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCAmelCase_ : List[Any] = self.count_ways_until(lowerCAmelCase__ ,task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) ,task_no + 1 )
# save the value.
lowerCAmelCase_ : Optional[int] = total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : Tuple ) -> int:
'''simple docstring'''
for i in range(len(lowerCAmelCase__ ) ):
for j in task_performed[i]:
self.task[j].append(lowerCAmelCase__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 ,1 )
if __name__ == "__main__":
_lowercase = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
_lowercase = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 683 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'is_longer']
def __init__( self : Optional[int] ,lowerCAmelCase__ : List[Any]=64 ,lowerCAmelCase__ : Any=4_80_00 ,lowerCAmelCase__ : Optional[Any]=4_80 ,lowerCAmelCase__ : List[str]=10 ,lowerCAmelCase__ : List[Any]=10_24 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : float = 0 ,lowerCAmelCase__ : float = 1_40_00 ,lowerCAmelCase__ : int = None ,lowerCAmelCase__ : str = "fusion" ,lowerCAmelCase__ : str = "repeatpad" ,**lowerCAmelCase__ : Union[str, Any] ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Optional[Any] = top_db
lowerCAmelCase_ : str = truncation
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : str = fft_window_size
lowerCAmelCase_ : Dict = (fft_window_size >> 1) + 1
lowerCAmelCase_ : Dict = hop_length
lowerCAmelCase_ : Any = max_length_s
lowerCAmelCase_ : int = max_length_s * sampling_rate
lowerCAmelCase_ : Optional[int] = sampling_rate
lowerCAmelCase_ : int = frequency_min
lowerCAmelCase_ : Optional[Any] = frequency_max
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm=lowerCAmelCase__ ,mel_scale="htk" ,)
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = spectrogram(
lowerCAmelCase__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCAmelCase__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
# randomly choose index for each part
lowerCAmelCase_ : str = np.random.choice(ranges[0] )
lowerCAmelCase_ : Optional[Any] = np.random.choice(ranges[1] )
lowerCAmelCase_ : Any = np.random.choice(ranges[2] )
lowerCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase_ : List[str] = torch.tensor(mel[None, None, :] )
lowerCAmelCase_ : List[Any] = torch.nn.functional.interpolate(
lowerCAmelCase__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = mel_shrink[0][0].numpy()
lowerCAmelCase_ : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase_ : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase_ : str = len(lowerCAmelCase__ ) - max_length
lowerCAmelCase_ : Any = np.random.randint(0 ,overflow + 1 )
lowerCAmelCase_ : Dict = waveform[idx : idx + max_length]
lowerCAmelCase_ : List[str] = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase_ : Tuple = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase_ : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase_ : Dict = np.stack([mel, mel, mel, mel] ,axis=0 )
lowerCAmelCase_ : int = False
else:
lowerCAmelCase_ : str = self._random_mel_fusion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase_ : List[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : int = np.stack(np.tile(lowerCAmelCase__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase_ : Optional[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Tuple = np.stack(np.tile(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = np.pad(lowerCAmelCase__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
lowerCAmelCase_ : int = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
lowerCAmelCase_ : str = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : str = None ,lowerCAmelCase__ : Optional[str] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCAmelCase__ : List[Any] ,) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ : List[str] = truncation if truncation is not None else self.truncation
lowerCAmelCase_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Dict = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : Tuple = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray(lowerCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase_ : Optional[Any] = [
self._get_input_mel(lowerCAmelCase__ ,max_length if max_length else self.nb_max_samples ,lowerCAmelCase__ ,lowerCAmelCase__ )
for waveform in raw_speech
]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : str = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase__ )
is_longer.append(lowerCAmelCase__ )
if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase_ : Any = np.random.randint(0 ,len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = True
if isinstance(input_mel[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase_ : List[Any] = [[longer] for longer in is_longer]
lowerCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
lowerCAmelCase_ : Dict = BatchFeature(lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : List[str] = input_features.convert_to_tensors(lowerCAmelCase__ )
return input_features
| 683 | 1 |
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase = 2
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,*, # begin keyword-only arguments
lowerCAmelCase__ : int="<s>" ,lowerCAmelCase__ : int="<pad>" ,lowerCAmelCase__ : Optional[Any]="</s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : int=None ,) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = bos, unk, pad, eos
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : Any = self.add_symbol(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = self.add_symbol(lowerCAmelCase__ )
lowerCAmelCase_ : int = self.add_symbol(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = self.add_symbol(lowerCAmelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = len(self.symbols )
def __eq__( self : List[str] ,lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Union[str, Any] ,lowerCAmelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[str] ) -> List[Any]:
'''simple docstring'''
return len(self.symbols )
def __contains__( self : str ,lowerCAmelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
return sym in self.indices
@classmethod
def UpperCAmelCase_ ( cls : List[str] ,lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = cls()
d.add_from_file(lowerCAmelCase__ )
return d
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Any=1 ,lowerCAmelCase__ : List[Any]=False ) -> int:
'''simple docstring'''
if word in self.indices and not overwrite:
lowerCAmelCase_ : Union[str, Any] = self.indices[word]
lowerCAmelCase_ : Any = self.count[idx] + n
return idx
else:
lowerCAmelCase_ : Union[str, Any] = len(self.symbols )
lowerCAmelCase_ : List[str] = idx
self.symbols.append(lowerCAmelCase__ )
self.count.append(lowerCAmelCase__ )
return idx
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
return 0
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
try:
with open(lowerCAmelCase__ ,"r" ,encoding="utf-8" ) as fd:
self.add_from_file(lowerCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(lowerCAmelCase__ ) )
return
lowerCAmelCase_ : int = f.readlines()
lowerCAmelCase_ : int = self._load_meta(lowerCAmelCase__ )
for line in lines[indices_start_line:]:
try:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = line.rstrip().rsplit(" " ,1 )
if field == "#fairseq:overwrite":
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ , lowerCAmelCase_ : int = line.rsplit(" " ,1 )
else:
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Optional[int] = int(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(lowerCAmelCase__ ) )
self.add_symbol(lowerCAmelCase__ ,n=lowerCAmelCase__ ,overwrite=lowerCAmelCase__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def UpperCamelCase ( snake_case__):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCAmelCase_ : Optional[Any] = dict((re.sub(R"@@$" , "" , snake_case__), v) if k.endswith("@@") else (re.sub(R"$" , "</w>" , snake_case__), v) for k, v in d.items())
lowerCAmelCase_ : Dict = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
lowerCAmelCase_ : Optional[int] = d[k] # restore
return da
def UpperCamelCase ( snake_case__ , snake_case__):
# prep
if not os.path.exists(snake_case__):
raise ValueError(F'''path {biogpt_checkpoint_path} does not exist!''')
os.makedirs(snake_case__ , exist_ok=snake_case__)
print(F'''Writing results to {pytorch_dump_folder_path}''')
# handle various types of models
lowerCAmelCase_ : List[str] = os.path.join(snake_case__ , "checkpoint.pt")
if not os.path.isfile(snake_case__):
raise ValueError(F'''path to the file {checkpoint_file} does not exist!''')
lowerCAmelCase_ : Dict = torch.load(snake_case__ , map_location="cpu")
lowerCAmelCase_ : Optional[Any] = chkpt["cfg"]["model"]
# dicts
lowerCAmelCase_ : Optional[Any] = os.path.join(snake_case__ , "dict.txt")
if not os.path.isfile(snake_case__):
raise ValueError(F'''path to the file {dict_file} does not exist!''')
lowerCAmelCase_ : Optional[Any] = Dictionary.load(snake_case__)
lowerCAmelCase_ : List[str] = rewrite_dict_keys(src_dict.indices)
lowerCAmelCase_ : List[str] = len(snake_case__)
lowerCAmelCase_ : Optional[Any] = os.path.join(snake_case__ , VOCAB_FILES_NAMES["vocab_file"])
print(F'''Generating {src_vocab_file} of {src_vocab_size} records''')
with open(snake_case__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__))
# merges_file (bpecodes)
lowerCAmelCase_ : Dict = os.path.join(snake_case__ , "bpecodes")
if not os.path.isfile(snake_case__):
raise ValueError(F'''path to the file {bpecodes_file} does not exist!''')
lowerCAmelCase_ : int = os.path.join(snake_case__ , VOCAB_FILES_NAMES["merges_file"])
shutil.copyfile(snake_case__ , snake_case__)
# model config
lowerCAmelCase_ : Any = os.path.join(snake_case__ , "config.json")
lowerCAmelCase_ : List[Any] = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1e-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F'''Generating {biogpt_model_config_file}''')
with open(snake_case__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__))
# tokenizer config
lowerCAmelCase_ : List[str] = os.path.join(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 10_24,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F'''Generating {biogpt_tokenizer_config_file}''')
with open(snake_case__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(snake_case__ , ensure_ascii=snake_case__ , indent=snake_case__))
# model
lowerCAmelCase_ : int = chkpt["model"]
# remove unneeded keys
lowerCAmelCase_ : List[Any] = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = list(model_state_dict.keys())
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight"):
lowerCAmelCase_ : Optional[int] = model_state_dict.pop(snake_case__)
else:
lowerCAmelCase_ : Union[str, Any] = model_state_dict.pop(snake_case__)
lowerCAmelCase_ : Dict = BioGptConfig.from_pretrained(snake_case__)
lowerCAmelCase_ : Tuple = BioGptForCausalLM(snake_case__)
# check that it loads ok
model_new.load_state_dict(snake_case__)
# save
lowerCAmelCase_ : Dict = os.path.join(snake_case__ , snake_case__)
print(F'''Generating {pytorch_weights_dump_path}''')
torch.save(snake_case__ , snake_case__)
print("Conversion is done!")
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 683 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowercase = Lock()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase_ : Optional[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase_ : Any = min(snake_case__ , snake_case__)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase_ : Dict = max(snake_case__ , snake_case__)
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase_ : Tuple = Pipe()
lowerCAmelCase_ : Optional[int] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
lowerCAmelCase_ : int = temp_rs
lowerCAmelCase_ : List[Any] = temp_rr
for i in range(1 , len(snake_case__) - 1):
lowerCAmelCase_ : Dict = Pipe()
lowerCAmelCase_ : List[str] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
lowerCAmelCase_ : Dict = temp_rs
lowerCAmelCase_ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__) - 1,
arr[len(snake_case__) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__)):
lowerCAmelCase_ : Union[str, Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = list(range(10 , 0 , -1))
print("Initial List")
print(*snake_case__)
lowerCAmelCase_ : Tuple = odd_even_transposition(snake_case__)
print("Sorted List\n")
print(*snake_case__)
if __name__ == "__main__":
main()
| 683 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Tuple=13 ,lowerCAmelCase__ : int=7 ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : List[Any]=True ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : List[str]=99 ,lowerCAmelCase__ : Dict=[1, 1, 2] ,lowerCAmelCase__ : int=1 ,lowerCAmelCase__ : Tuple=32 ,lowerCAmelCase__ : List[str]=4 ,lowerCAmelCase__ : Optional[Any]=8 ,lowerCAmelCase__ : Dict=37 ,lowerCAmelCase__ : Optional[Any]="gelu_new" ,lowerCAmelCase__ : Dict=0.1 ,lowerCAmelCase__ : Union[str, Any]=0.1 ,lowerCAmelCase__ : Any=0.0 ,lowerCAmelCase__ : Optional[int]=5_12 ,lowerCAmelCase__ : List[Any]=3 ,lowerCAmelCase__ : Tuple=0.02 ,lowerCAmelCase__ : Optional[Any]=3 ,lowerCAmelCase__ : List[str]=4 ,lowerCAmelCase__ : str=None ,lowerCAmelCase__ : List[Any]=False ,) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : str = seq_length
lowerCAmelCase_ : Optional[int] = is_training
lowerCAmelCase_ : Union[str, Any] = use_input_mask
lowerCAmelCase_ : List[str] = use_token_type_ids
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : List[str] = block_sizes
lowerCAmelCase_ : Dict = num_decoder_layers
lowerCAmelCase_ : List[Any] = d_model
lowerCAmelCase_ : Optional[Any] = n_head
lowerCAmelCase_ : List[Any] = d_head
lowerCAmelCase_ : List[str] = d_inner
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : str = hidden_dropout
lowerCAmelCase_ : Any = attention_dropout
lowerCAmelCase_ : Optional[Any] = activation_dropout
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : int = type_vocab_size
lowerCAmelCase_ : Optional[int] = 2
lowerCAmelCase_ : List[Any] = num_labels
lowerCAmelCase_ : Optional[Any] = num_choices
lowerCAmelCase_ : List[str] = scope
lowerCAmelCase_ : int = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase_ : Optional[Any] = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase_ : Tuple = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase_ : List[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase_ : Tuple = self.num_hidden_layers + 2
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase_ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Tuple = None
if self.use_labels:
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCAmelCase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowerCAmelCase_ : str = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Any ,) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = TFFunnelModel(config=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : str = model(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = [input_ids, input_mask]
lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Tuple = TFFunnelModel(config=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase_ : str = False
lowerCAmelCase_ : int = TFFunnelModel(config=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : str ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Optional[int] ,) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = TFFunnelBaseModel(config=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : List[str] = model(lowerCAmelCase__ )
lowerCAmelCase_ : int = [input_ids, input_mask]
lowerCAmelCase_ : str = model(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : List[str] = TFFunnelBaseModel(config=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : str = TFFunnelBaseModel(config=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Any ,) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = TFFunnelForPreTraining(config=lowerCAmelCase__ )
lowerCAmelCase_ : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Dict ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = TFFunnelForMaskedLM(config=lowerCAmelCase__ )
lowerCAmelCase_ : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : List[Any] ,) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = self.num_labels
lowerCAmelCase_ : List[str] = TFFunnelForSequenceClassification(config=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Tuple ,) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.num_choices
lowerCAmelCase_ : Optional[int] = TFFunnelForMultipleChoice(config=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = tf.tile(tf.expand_dims(lowerCAmelCase__ ,1 ) ,(1, self.num_choices, 1) )
lowerCAmelCase_ : int = tf.tile(tf.expand_dims(lowerCAmelCase__ ,1 ) ,(1, self.num_choices, 1) )
lowerCAmelCase_ : Optional[int] = tf.tile(tf.expand_dims(lowerCAmelCase__ ,1 ) ,(1, self.num_choices, 1) )
lowerCAmelCase_ : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase_ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Optional[Any] ,) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.num_labels
lowerCAmelCase_ : int = TFFunnelForTokenClassification(config=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Union[str, Any] ,) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = TFFunnelForQuestionAnswering(config=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : Tuple = config_and_inputs
lowerCAmelCase_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = TFFunnelModelTester(self )
lowerCAmelCase_ : Optional[int] = ConfigTester(self ,config_class=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@require_tf
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = TFFunnelModelTester(self ,base=lowerCAmelCase__ )
lowerCAmelCase_ : str = ConfigTester(self ,config_class=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
| 683 |
from typing import Any
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict = {}
lowerCAmelCase_ : dict = {}
for state in states_space:
lowerCAmelCase_ : List[Any] = observations_space[0]
lowerCAmelCase_ : int = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__)):
lowerCAmelCase_ : List[Any] = observations_space[o]
lowerCAmelCase_ : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Tuple = -1
for k_state in states_space:
lowerCAmelCase_ : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Optional[Any] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : Any = arg_max
# The final observation
lowerCAmelCase_ : List[Any] = observations_space[len(snake_case__) - 1]
# argmax for given final observation
lowerCAmelCase_ : List[str] = ""
lowerCAmelCase_ : List[str] = -1
for k_state in states_space:
lowerCAmelCase_ : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Tuple = k_state
lowerCAmelCase_ : str = arg_max
# Process pointers backwards
lowerCAmelCase_ : int = last_state
lowerCAmelCase_ : int = []
for o in range(len(snake_case__) - 1 , -1 , -1):
result.append(snake_case__)
lowerCAmelCase_ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__)
_validate_dicts(
snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError("There's an empty parameter")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_list(snake_case__ , "observations_space")
_validate_list(snake_case__ , "states_space")
def UpperCamelCase ( snake_case__ , snake_case__):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list'''
raise ValueError(snake_case__)
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
_validate_dict(snake_case__ , "initial_probabilities" , snake_case__)
_validate_nested_dict(snake_case__ , "transition_probabilities")
_validate_nested_dict(snake_case__ , "emission_probabilities")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_dict(_object , snake_case__ , snake_case__)
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object):
lowerCAmelCase_ : Dict = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object.values()):
lowerCAmelCase_ : Union[str, Any] = "nested dictionary " if nested else ""
lowerCAmelCase_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __snake_case ( snake_case__ ):
"""simple docstring"""
@slow
@require_torch
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" ,"prajjwal1/bert-tiny" )
lowerCAmelCase_ : Dict = BertTokenizer.from_pretrained("bert-base-uncased" )
lowerCAmelCase_ : List[Any] = bertabert.config.encoder.vocab_size
lowerCAmelCase_ : Tuple = tokenizer.sep_token_id
lowerCAmelCase_ : Any = tokenizer.cls_token_id
lowerCAmelCase_ : Union[str, Any] = 1_28
lowerCAmelCase_ : List[Any] = datasets.load_dataset("cnn_dailymail" ,"3.0.0" ,split="train[:1%]" )
lowerCAmelCase_ : str = datasets.load_dataset("cnn_dailymail" ,"3.0.0" ,split="validation[:1%]" )
lowerCAmelCase_ : Optional[Any] = train_dataset.select(range(32 ) )
lowerCAmelCase_ : List[str] = val_dataset.select(range(16 ) )
lowerCAmelCase_ : Union[str, Any] = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase__ : List[str] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCAmelCase_ : str = tokenizer(batch["article"] ,padding="max_length" ,truncation=lowerCAmelCase__ ,max_length=5_12 )
lowerCAmelCase_ : Optional[int] = tokenizer(batch["highlights"] ,padding="max_length" ,truncation=lowerCAmelCase__ ,max_length=1_28 )
lowerCAmelCase_ : Any = inputs.input_ids
lowerCAmelCase_ : str = inputs.attention_mask
lowerCAmelCase_ : Tuple = outputs.input_ids
lowerCAmelCase_ : Optional[int] = outputs.input_ids.copy()
lowerCAmelCase_ : Any = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowerCAmelCase_ : int = outputs.attention_mask
assert all(len(lowerCAmelCase__ ) == 5_12 for x in inputs.input_ids )
assert all(len(lowerCAmelCase__ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase__ : List[Any] ):
lowerCAmelCase_ : List[Any] = pred.label_ids
lowerCAmelCase_ : Optional[Any] = pred.predictions
# all unnecessary tokens are removed
lowerCAmelCase_ : Tuple = tokenizer.batch_decode(lowerCAmelCase__ ,skip_special_tokens=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = tokenizer.batch_decode(lowerCAmelCase__ ,skip_special_tokens=lowerCAmelCase__ )
lowerCAmelCase_ : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase__ ) )] ) / len(lowerCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
lowerCAmelCase_ : str = train_dataset.map(
_map_to_encoder_decoder_inputs ,batched=lowerCAmelCase__ ,batch_size=lowerCAmelCase__ ,remove_columns=["article", "highlights"] ,)
train_dataset.set_format(
type="torch" ,columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] ,)
# same for validation dataset
lowerCAmelCase_ : List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs ,batched=lowerCAmelCase__ ,batch_size=lowerCAmelCase__ ,remove_columns=["article", "highlights"] ,)
val_dataset.set_format(
type="torch" ,columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] ,)
lowerCAmelCase_ : List[str] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Union[str, Any] = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase__ ,per_device_train_batch_size=lowerCAmelCase__ ,per_device_eval_batch_size=lowerCAmelCase__ ,predict_with_generate=lowerCAmelCase__ ,evaluation_strategy="steps" ,do_train=lowerCAmelCase__ ,do_eval=lowerCAmelCase__ ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,)
# instantiate trainer
lowerCAmelCase_ : List[str] = SeqaSeqTrainer(
model=lowerCAmelCase__ ,args=lowerCAmelCase__ ,compute_metrics=_compute_metrics ,train_dataset=lowerCAmelCase__ ,eval_dataset=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,)
# start training
trainer.train()
| 683 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : int=13 ,lowerCAmelCase__ : Union[str, Any]=7 ,lowerCAmelCase__ : int=True ,lowerCAmelCase__ : List[Any]=True ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : Any=99 ,lowerCAmelCase__ : Optional[Any]=32 ,lowerCAmelCase__ : Any=5 ,lowerCAmelCase__ : str=4 ,lowerCAmelCase__ : Any=37 ,lowerCAmelCase__ : Dict="gelu" ,lowerCAmelCase__ : Union[str, Any]=0.1 ,lowerCAmelCase__ : Dict=0.1 ,lowerCAmelCase__ : Tuple=5_12 ,lowerCAmelCase__ : Optional[Any]=16 ,lowerCAmelCase__ : List[Any]=2 ,lowerCAmelCase__ : List[str]=0.02 ,lowerCAmelCase__ : List[Any]=4 ,) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : int = parent
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : Any = use_attention_mask
lowerCAmelCase_ : List[Any] = use_token_type_ids
lowerCAmelCase_ : int = use_labels
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : List[Any] = max_position_embeddings
lowerCAmelCase_ : Tuple = type_vocab_size
lowerCAmelCase_ : List[str] = type_sequence_label_size
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Dict = num_choices
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase_ : Optional[Any] = None
if self.use_attention_mask:
lowerCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Tuple = None
if self.use_token_type_ids:
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase_ : Dict = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCAmelCase__ ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self : Any ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = config_and_inputs
lowerCAmelCase_ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = config_and_inputs
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = True
UpperCamelCase_ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[str] = FlaxBertModelTester(self )
@slow
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained("bert-base-cased" )
lowerCAmelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 683 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowercase = None
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case__ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def UpperCamelCase ( snake_case__):
def remove_articles(snake_case__):
return ARTICLES_REGEX.sub(" " , snake_case__)
def white_space_fix(snake_case__):
return " ".join(text.split())
def remove_punc(snake_case__):
lowerCAmelCase_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def UpperCamelCase ( snake_case__):
if not s:
return []
return normalize_answer(snake_case__).split()
def UpperCamelCase ( snake_case__ , snake_case__):
return int(normalize_answer(snake_case__) == normalize_answer(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = get_tokens(snake_case__)
lowerCAmelCase_ : Union[str, Any] = get_tokens(snake_case__)
lowerCAmelCase_ : Any = collections.Counter(snake_case__) & collections.Counter(snake_case__)
lowerCAmelCase_ : Dict = sum(common.values())
if len(snake_case__) == 0 or len(snake_case__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : int = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : int = qa["id"]
lowerCAmelCase_ : Any = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCAmelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : Any = max(compute_exact(snake_case__ , snake_case__) for a in gold_answers)
lowerCAmelCase_ : Optional[Any] = max(compute_fa(snake_case__ , snake_case__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = {}
for qid, s in scores.items():
lowerCAmelCase_ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : List[str] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase_ : Union[str, Any] = s
return new_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
if not qid_list:
lowerCAmelCase_ : Any = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCAmelCase_ : Tuple = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
plt.step(snake_case__ , snake_case__ , color="b" , alpha=0.2 , where="post")
plt.fill_between(snake_case__ , snake_case__ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(snake_case__)
plt.savefig(snake_case__)
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
lowerCAmelCase_ : List[Any] = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Tuple = [1.0]
lowerCAmelCase_ : Tuple = [0.0]
lowerCAmelCase_ : Dict = 0.0
for i, qid in enumerate(snake_case__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : str = true_pos / float(i + 1)
lowerCAmelCase_ : Union[str, Any] = true_pos / float(snake_case__)
if i == len(snake_case__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__)
recalls.append(snake_case__)
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return {"ap": 100.0 * avg_prec}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
if out_image_dir and not os.path.exists(snake_case__):
os.makedirs(snake_case__)
lowerCAmelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase_ : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ : Dict = {k: float(snake_case__) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : str = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case__ , snake_case__ , "pr_exact")
merge_eval(snake_case__ , snake_case__ , "pr_f1")
merge_eval(snake_case__ , snake_case__ , "pr_oracle")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if not qid_list:
return
lowerCAmelCase_ : Optional[Any] = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : Dict = np.ones_like(snake_case__) / float(len(snake_case__))
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(snake_case__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase_ : str = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : str = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
for i, qid in enumerate(snake_case__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : List[Any] = -1
else:
lowerCAmelCase_ : List[str] = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[Any] = cur_score
lowerCAmelCase_ : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(snake_case__), best_thresh
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = best_exact
lowerCAmelCase_ : List[str] = exact_thresh
lowerCAmelCase_ : Any = best_fa
lowerCAmelCase_ : List[str] = fa_thresh
def UpperCamelCase ( ):
with open(OPTS.data_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
lowerCAmelCase_ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCAmelCase_ : int = json.load(snake_case__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
else:
lowerCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
lowerCAmelCase_ : Tuple = make_qid_to_has_ans(snake_case__) # maps qid to True/False
lowerCAmelCase_ : Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_raw_scores(snake_case__ , snake_case__)
lowerCAmelCase_ : str = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Dict = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__)
if has_ans_qids:
lowerCAmelCase_ : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "HasAns")
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir)
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(snake_case__ , snake_case__)
else:
print(json.dumps(snake_case__ , indent=2))
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 683 | 1 |
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
# Base Case
if curr_ind == len(snake_case__):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(snake_case__)):
if valid_connection(snake_case__ , snake_case__ , snake_case__ , snake_case__):
# Insert current vertex into path as next transition
lowerCAmelCase_ : List[Any] = next_ver
# Validate created path
if util_hamilton_cycle(snake_case__ , snake_case__ , curr_ind + 1):
return True
# Backtrack
lowerCAmelCase_ : str = -1
return False
def UpperCamelCase ( snake_case__ , snake_case__ = 0):
lowerCAmelCase_ : Any = [-1] * (len(snake_case__) + 1)
# initialize start and end of path with starting index
lowerCAmelCase_ : Dict = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(snake_case__ , snake_case__ , 1) else []
| 683 |
from math import sqrt
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = 0
for i in range(1 , int(sqrt(snake_case__) + 1)):
if n % i == 0 and i != sqrt(snake_case__):
total += i + n // i
elif i == sqrt(snake_case__):
total += i
return total - n
def UpperCamelCase ( snake_case__ = 1_00_00):
lowerCAmelCase_ : int = sum(
i
for i in range(1 , snake_case__)
if sum_of_divisors(sum_of_divisors(snake_case__)) == i and sum_of_divisors(snake_case__) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 683 | 1 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def UpperCamelCase ( ):
lowerCAmelCase_ : Union[str, Any] = 9
lowerCAmelCase_ : Dict = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowerCAmelCase_ : Union[str, Any] = kruskal(snake_case__ , snake_case__)
lowerCAmelCase_ : Union[str, Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(snake_case__) == sorted(snake_case__)
| 683 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 1 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor")
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor")
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor")
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : str = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Union[str, Any] = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Optional[Any]="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : int="<mask>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : int ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Dict = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[Any] = bytes_to_unicode()
lowerCAmelCase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Dict = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Tuple = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Optional[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Union[str, Any] = " " + text
return (text, kwargs)
| 683 | 1 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowercase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
for attribute in key.split("."):
lowerCAmelCase_ : str = getattr(snake_case__ , snake_case__)
if weight_type is not None:
lowerCAmelCase_ : int = getattr(snake_case__ , snake_case__).shape
else:
lowerCAmelCase_ : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowerCAmelCase_ : int = value
elif weight_type == "weight_g":
lowerCAmelCase_ : int = value
elif weight_type == "weight_v":
lowerCAmelCase_ : Dict = value
elif weight_type == "bias":
lowerCAmelCase_ : Tuple = value
else:
lowerCAmelCase_ : int = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''')
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Optional[Any] = fairseq_model.state_dict()
lowerCAmelCase_ : List[Any] = hf_model.feature_extractor
lowerCAmelCase_ : Any = hf_model.adapter
for name, value in fairseq_dict.items():
lowerCAmelCase_ : Any = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == "group" , )
lowerCAmelCase_ : Any = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."]):
load_adapter(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
lowerCAmelCase_ : List[Any] = True
if "*" in mapped_key:
lowerCAmelCase_ : Optional[int] = name.split(snake_case__)[0].split(".")[-2]
lowerCAmelCase_ : int = mapped_key.replace("*" , snake_case__)
if "weight_g" in name:
lowerCAmelCase_ : Any = "weight_g"
elif "weight_v" in name:
lowerCAmelCase_ : int = "weight_v"
elif "bias" in name:
lowerCAmelCase_ : Optional[int] = "bias"
elif "weight" in name:
lowerCAmelCase_ : str = "weight"
else:
lowerCAmelCase_ : Optional[Any] = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
continue
if not is_used:
unused_weights.append(snake_case__)
logger.warning(F'''Unused weights: {unused_weights}''')
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = full_name.split("conv_layers.")[-1]
lowerCAmelCase_ : Union[str, Any] = name.split(".")
lowerCAmelCase_ : Any = int(items[0])
lowerCAmelCase_ : Any = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowerCAmelCase_ : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowerCAmelCase_ : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowerCAmelCase_ : List[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowerCAmelCase_ : Union[str, Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : int = full_name.split("adaptor.")[-1]
lowerCAmelCase_ : Dict = name.split(".")
if items[1].isdigit():
lowerCAmelCase_ : Any = int(items[1])
else:
lowerCAmelCase_ : int = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'''
lowerCAmelCase_ : str = value
logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''')
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'''
lowerCAmelCase_ : Union[str, Any] = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'''
lowerCAmelCase_ : List[str] = value
logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''')
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'''
lowerCAmelCase_ : str = value
logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''')
elif isinstance(snake_case__ , snake_case__):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'''
lowerCAmelCase_ : Dict = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''')
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'''
lowerCAmelCase_ : Dict = value
logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''')
else:
unused_weights.append(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = emb.weight.shape
lowerCAmelCase_ : Optional[int] = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__)
lowerCAmelCase_ : Optional[int] = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
lowerCAmelCase_ : List[Any] = WavaVecaConfig.from_pretrained(
snake_case__ , add_adapter=snake_case__ , adapter_stride=snake_case__ , adapter_kernel_size=snake_case__ , use_auth_token=snake_case__ , output_hidden_size=snake_case__ , )
lowerCAmelCase_ : Dict = MBartConfig.from_pretrained(snake_case__)
# load model
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/")[:-1]),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
lowerCAmelCase_ : List[Any] = model[0].eval()
# load feature extractor
lowerCAmelCase_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(snake_case__ , use_auth_token=snake_case__)
# set weights for wav2vec2 encoder
lowerCAmelCase_ : int = WavaVecaModel(snake_case__)
recursively_load_weights_wavaveca(model.encoder , snake_case__)
# load decoder weights
lowerCAmelCase_ : Tuple = MBartForCausalLM(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Any = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=snake_case__)
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''')
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''')
lowerCAmelCase_ : int = SpeechEncoderDecoderModel(encoder=snake_case__ , decoder=snake_case__)
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Optional[int] = MBartaaTokenizer(snake_case__)
tokenizer.save_pretrained(snake_case__)
lowerCAmelCase_ : int = hf_wavavec.config.to_dict()
lowerCAmelCase_ : List[Any] = tokenizer.pad_token_id
lowerCAmelCase_ : int = tokenizer.bos_token_id
lowerCAmelCase_ : Tuple = tokenizer.eos_token_id
lowerCAmelCase_ : Tuple = "mbart50"
lowerCAmelCase_ : Dict = "wav2vec2"
lowerCAmelCase_ : Any = tokenizer.eos_token_id
lowerCAmelCase_ : List[Any] = 25_00_04
lowerCAmelCase_ : Any = tokenizer.eos_token_id
lowerCAmelCase_ : Dict = SpeechEncoderDecoderConfig.from_dict(snake_case__)
hf_wavavec.save_pretrained(snake_case__)
feature_extractor.save_pretrained(snake_case__)
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=250004, type=int, help='''`decoder_start_token_id` of model config''')
_lowercase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 683 |
from collections.abc import Iterable
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : int | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} ,indent=1 )
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Node | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = root
def __str__( self : Dict ) -> str:
'''simple docstring'''
return str(self.root )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Node ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : Any = new_children
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Node ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.root is None
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ : Optional[int] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ : List[str] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ : Dict = new_node
break
else:
lowerCAmelCase_ : str = parent_node.right
lowerCAmelCase_ : Optional[int] = parent_node
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int] ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ : Union[str, Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ : Union[str, Any] = node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowerCAmelCase_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ : Dict = self.root
while node.left is not None:
lowerCAmelCase_ : Union[str, Any] = node.left
return node
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ ,node.left )
else:
lowerCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node | None ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : list ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if node:
self.inorder(lowerCAmelCase__ ,node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ ,node.right )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Node ) -> int:
'''simple docstring'''
lowerCAmelCase_ : list[int] = []
self.inorder(lowerCAmelCase__ ,lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = []
if curr_node is not None:
lowerCAmelCase_ : Dict = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(snake_case__)
# Prints all the elements of the list in order traversal
print(snake_case__)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case__)
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = DanceDiffusionPipeline
UpperCamelCase_ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
UpperCamelCase_ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
UpperCamelCase_ = False
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=5_12 ,sample_rate=1_60_00 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=lowerCAmelCase__ ,use_timestep_embedding=lowerCAmelCase__ ,time_embedding_type="fourier" ,mid_block_type="UNetMidBlock1D" ,down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,)
lowerCAmelCase_ : int = IPNDMScheduler()
lowerCAmelCase_ : int = {
"unet": unet,
"scheduler": scheduler,
}
return components
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Tuple=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : int = DanceDiffusionPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = pipe(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = output.audios
lowerCAmelCase_ : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase_ : Dict = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCAmelCase_ ( self : int ) -> Tuple:
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase_ ( self : int ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = torch_device
lowerCAmelCase_ : str = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
lowerCAmelCase_ : Dict = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Any = torch.manual_seed(0 )
lowerCAmelCase_ : str = pipe(generator=lowerCAmelCase__ ,num_inference_steps=1_00 ,audio_length_in_s=4.096 )
lowerCAmelCase_ : Tuple = output.audios
lowerCAmelCase_ : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase_ : Dict = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = torch_device
lowerCAmelCase_ : Tuple = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" ,torch_dtype=torch.floataa )
lowerCAmelCase_ : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = torch.manual_seed(0 )
lowerCAmelCase_ : Any = pipe(generator=lowerCAmelCase__ ,num_inference_steps=1_00 ,audio_length_in_s=4.096 )
lowerCAmelCase_ : str = output.audios
lowerCAmelCase_ : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase_ : Union[str, Any] = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 683 |
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683 | 1 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = OpenAIGPTTokenizer
UpperCamelCase_ = OpenAIGPTTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCAmelCase_ : Optional[int] = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Optional[int] = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowerCAmelCase_ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) )
with open(self.merges_file ,"w" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[str] ) -> int:
'''simple docstring'''
return "lower newer", "lower newer"
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
lowerCAmelCase_ : Union[str, Any] = "lower"
lowerCAmelCase_ : List[str] = ["low", "er</w>"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = tokens + ["<unk>"]
lowerCAmelCase_ : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[str]=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
# Simple input
lowerCAmelCase_ : int = "This is a simple input"
lowerCAmelCase_ : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __snake_case ( snake_case__ ):
"""simple docstring"""
pass
| 683 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor")
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor")
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor")
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {
'''yjernite/retribert-base-uncased''': 512,
}
_lowercase = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = RetriBertTokenizer
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : int ,lowerCAmelCase__ : Optional[Any]=None ,lowerCAmelCase__ : List[Any]=None ,lowerCAmelCase__ : str=True ,lowerCAmelCase__ : Dict="[UNK]" ,lowerCAmelCase__ : List[str]="[SEP]" ,lowerCAmelCase__ : Tuple="[PAD]" ,lowerCAmelCase__ : int="[CLS]" ,lowerCAmelCase__ : Dict="[MASK]" ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : Dict=None ,**lowerCAmelCase__ : List[str] ,) -> Dict:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,tokenize_chinese_chars=lowerCAmelCase__ ,strip_accents=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : List[Any] = getattr(lowerCAmelCase__ ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : List[str] = strip_accents
lowerCAmelCase_ : Any = tokenize_chinese_chars
lowerCAmelCase_ : Tuple = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase_ : Any = do_lower_case
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : int=None ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 683 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 1 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 'ssube/stable-diffusion-x4-upscaler-onnx'
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Any=0 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : int = floats_tensor((1, 3, 1_28, 1_28) ,rng=random.Random(lowerCAmelCase__ ) )
lowerCAmelCase_ : Tuple = torch.manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = self.get_dummy_inputs()
lowerCAmelCase_ : Any = pipe(**lowerCAmelCase__ ).images
lowerCAmelCase_ : Any = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : List[str] = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
lowerCAmelCase_ : Dict = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.get_dummy_inputs()
lowerCAmelCase_ : Any = pipe(**lowerCAmelCase__ ).images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : str = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
lowerCAmelCase_ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : str = self.get_dummy_inputs()
lowerCAmelCase_ : Any = pipe(**lowerCAmelCase__ ).images
lowerCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Any = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
lowerCAmelCase_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase_ : Any = pipe(**lowerCAmelCase__ ).images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Union[str, Any] = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
lowerCAmelCase_ : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.get_dummy_inputs()
lowerCAmelCase_ : List[str] = pipe(**lowerCAmelCase__ ).images
lowerCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : int = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = ort.SessionOptions()
lowerCAmelCase_ : Union[str, Any] = False
return options
def UpperCAmelCase_ ( self : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase_ : Optional[Any] = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
lowerCAmelCase_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = "A fantasy landscape, trending on artstation"
lowerCAmelCase_ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = pipe(
prompt=lowerCAmelCase__ ,image=lowerCAmelCase__ ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCAmelCase__ ,output_type="np" ,)
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : Dict = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : Any = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase_ : Any = init_image.resize((1_28, 1_28) )
lowerCAmelCase_ : Dict = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" ,subfolder="scheduler" )
lowerCAmelCase_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" ,scheduler=lowerCAmelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = "A fantasy landscape, trending on artstation"
lowerCAmelCase_ : Any = torch.manual_seed(0 )
lowerCAmelCase_ : Dict = pipe(
prompt=lowerCAmelCase__ ,image=lowerCAmelCase__ ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=lowerCAmelCase__ ,output_type="np" ,)
lowerCAmelCase_ : str = output.images
lowerCAmelCase_ : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase_ : List[Any] = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 683 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = HfArgumentParser(snake_case__)
lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 683 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
_lowercase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
_lowercase = TaTokenizerFast
_lowercase = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
_lowercase = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 683 |
_lowercase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCamelCase ( snake_case__):
assert type(snake_case__) in (int, float) and decimal == int(snake_case__)
lowerCAmelCase_ : Optional[Any] = int(snake_case__)
lowerCAmelCase_ : Tuple = ""
lowerCAmelCase_ : str = False
if decimal < 0:
lowerCAmelCase_ : Tuple = True
decimal *= -1
while decimal > 0:
lowerCAmelCase_ , lowerCAmelCase_ : Any = divmod(snake_case__ , 16)
lowerCAmelCase_ : Dict = values[remainder] + hexadecimal
lowerCAmelCase_ : List[str] = "0x" + hexadecimal
if negative:
lowerCAmelCase_ : Optional[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase = ['''text''', '''image''', '''audio''']
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((5_12, 5_12)))
elif input_type == "audio":
inputs.append(torch.ones(30_00))
elif isinstance(snake_case__ , snake_case__):
inputs.append(create_inputs(snake_case__))
else:
raise ValueError(F'''Invalid type requested: {input_type}''')
return inputs
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = []
for output in outputs:
if isinstance(snake_case__ , (str, AgentText)):
output_types.append("text")
elif isinstance(snake_case__ , (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(snake_case__ , (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(F'''Invalid output: {output}''')
return output_types
@is_tool_test
class __snake_case :
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
lowerCAmelCase_ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase_ : Optional[int] = [outputs]
self.assertListEqual(output_types(lowerCAmelCase__ ) ,self.tool.outputs )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : str = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase__ ,self.tool.outputs ):
lowerCAmelCase_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = []
for _input, input_type in zip(lowerCAmelCase__ ,self.tool.inputs ):
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : int = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
| 683 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class __snake_case ( snake_case__ , snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'focalnet'
def __init__( self : int ,lowerCAmelCase__ : Dict=2_24 ,lowerCAmelCase__ : str=4 ,lowerCAmelCase__ : Optional[Any]=3 ,lowerCAmelCase__ : Optional[int]=96 ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : List[str]=[1_92, 3_84, 7_68, 7_68] ,lowerCAmelCase__ : Any=[2, 2, 6, 2] ,lowerCAmelCase__ : Tuple=[2, 2, 2, 2] ,lowerCAmelCase__ : List[Any]=[3, 3, 3, 3] ,lowerCAmelCase__ : str="gelu" ,lowerCAmelCase__ : int=4.0 ,lowerCAmelCase__ : Optional[Any]=0.0 ,lowerCAmelCase__ : Dict=0.1 ,lowerCAmelCase__ : Optional[Any]=False ,lowerCAmelCase__ : str=1e-4 ,lowerCAmelCase__ : Optional[int]=False ,lowerCAmelCase__ : Dict=False ,lowerCAmelCase__ : int=False ,lowerCAmelCase__ : Any=0.02 ,lowerCAmelCase__ : List[Any]=1e-5 ,lowerCAmelCase__ : Tuple=32 ,lowerCAmelCase__ : List[str]=None ,lowerCAmelCase__ : Optional[int]=None ,**lowerCAmelCase__ : Optional[int] ,) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = image_size
lowerCAmelCase_ : Any = patch_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Dict = embed_dim
lowerCAmelCase_ : str = use_conv_embed
lowerCAmelCase_ : Optional[int] = hidden_sizes
lowerCAmelCase_ : str = depths
lowerCAmelCase_ : Optional[int] = focal_levels
lowerCAmelCase_ : List[str] = focal_windows
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[int] = mlp_ratio
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[Any] = drop_path_rate
lowerCAmelCase_ : Optional[int] = use_layerscale
lowerCAmelCase_ : Union[str, Any] = layerscale_value
lowerCAmelCase_ : Any = use_post_layernorm
lowerCAmelCase_ : List[str] = use_post_layernorm_in_modulation
lowerCAmelCase_ : List[Any] = normalize_modulator
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : int = encoder_stride
lowerCAmelCase_ : Optional[Any] = ["stem"] + [f'''stage{idx}''' for idx in range(1 ,len(self.depths ) + 1 )]
lowerCAmelCase_ , lowerCAmelCase_ : Any = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ ,out_indices=lowerCAmelCase__ ,stage_names=self.stage_names )
| 683 |
import pytest
_lowercase = '''__dummy_dataset1__'''
_lowercase = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = dataset_loading_script_name
lowerCAmelCase_ : List[str] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=snake_case__)
lowerCAmelCase_ : List[Any] = script_dir / F'''{script_name}.py'''
with open(snake_case__ , "w") as f:
f.write(snake_case__)
return str(snake_case__)
| 683 | 1 |
from copy import deepcopy
class __snake_case :
"""simple docstring"""
def __init__( self : Any ,lowerCAmelCase__ : list[int] | None = None ,lowerCAmelCase__ : int | None = None ) -> None:
'''simple docstring'''
if arr is None and size is not None:
lowerCAmelCase_ : List[Any] = size
lowerCAmelCase_ : Optional[int] = [0] * size
elif arr is not None:
self.init(lowerCAmelCase__ )
else:
raise ValueError("Either arr or size must be specified" )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : list[int] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Tuple = len(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = deepcopy(lowerCAmelCase__ )
for i in range(1 ,self.size ):
lowerCAmelCase_ : Union[str, Any] = self.next_(lowerCAmelCase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def UpperCAmelCase_ ( self : List[str] ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.tree[:]
for i in range(self.size - 1 ,0 ,-1 ):
lowerCAmelCase_ : Any = self.next_(lowerCAmelCase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def UpperCAmelCase_ ( lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def UpperCAmelCase_ ( lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return index - (index & (-index))
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase_ : Optional[Any] = self.next_(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
self.add(lowerCAmelCase__ ,value - self.get(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
if right == 0:
return 0
lowerCAmelCase_ : Optional[Any] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase_ : Tuple = self.prev(lowerCAmelCase__ )
return result
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return self.prefix(lowerCAmelCase__ ) - self.prefix(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return self.query(lowerCAmelCase__ ,index + 1 )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase_ : List[Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase_ : int = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = CodeGenTokenizer
UpperCamelCase_ = CodeGenTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = {'add_prefix_space': True}
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : int = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : List[Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = "lower newer"
lowerCAmelCase_ : Tuple = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase_ : Dict = "lower newer"
lowerCAmelCase_ : Dict = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Tuple = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing the unknown token
lowerCAmelCase_ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,*lowerCAmelCase__ : List[str] ,**lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Any=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
# Simple input
lowerCAmelCase_ : int = "This is a simple input"
lowerCAmelCase_ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : str = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : Dict = tokenizer.pad_token_id
lowerCAmelCase_ : Union[str, Any] = tokenizer(lowerCAmelCase__ ,padding="max_length" ,max_length=30 ,return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
lowerCAmelCase_ : Any = tokenizer(*lowerCAmelCase__ ,padding="max_length" ,max_length=60 ,return_tensors="np" )
lowerCAmelCase_ : Optional[int] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = "$$$"
lowerCAmelCase_ : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowerCAmelCase__ ,add_bos_token=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : int = tokenizer.bos_token_id
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ )
self.assertEqual(out_s.input_ids[0] ,lowerCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : List[str] = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowerCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowerCAmelCase_ : str = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCAmelCase_ : int = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase__ )
lowerCAmelCase_ : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.decode(lowerCAmelCase__ ,truncate_before_pattern=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
| 683 | 1 |
from math import pow, sqrt
def UpperCamelCase ( *snake_case__):
lowerCAmelCase_ : str = len(snake_case__) > 0 and all(value > 0.0 for value in values)
return result
def UpperCamelCase ( snake_case__ , snake_case__):
return (
round(sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(snake_case__ , snake_case__)
else ValueError("Input Error: Molar mass values must greater than 0.")
)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(snake_case__ , snake_case__ , snake_case__)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a) , 6)
if validate(snake_case__ , snake_case__ , snake_case__)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2) , 6)
if validate(snake_case__ , snake_case__ , snake_case__)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2) / molar_mass , 6)
if validate(snake_case__ , snake_case__ , snake_case__)
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0.")
)
| 683 |
from __future__ import annotations
from random import random
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : int | None = None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Any = random()
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Any ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} ,indent=1 )
def __str__( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = str(self.value ) + " "
lowerCAmelCase_ : List[Any] = str(self.left or "" )
lowerCAmelCase_ : Union[str, Any] = str(self.right or "" )
return value + left + right
def UpperCamelCase ( snake_case__ , snake_case__):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase_ , lowerCAmelCase_ : Any = split(root.left , snake_case__)
return left, root
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = split(root.right , snake_case__)
return root, right
def UpperCamelCase ( snake_case__ , snake_case__):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase_ : Dict = merge(left.right , snake_case__)
return left
else:
lowerCAmelCase_ : List[str] = merge(snake_case__ , right.left)
return right
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = Node(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = split(snake_case__ , snake_case__)
return merge(merge(snake_case__ , snake_case__) , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = split(snake_case__ , value - 1)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = split(snake_case__ , snake_case__)
return merge(snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__):
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=",")
inorder(root.right)
def UpperCamelCase ( snake_case__ , snake_case__):
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase_ : List[str] = insert(snake_case__ , int(arg[1:]))
elif arg[0] == "-":
lowerCAmelCase_ : Optional[int] = erase(snake_case__ , int(arg[1:]))
else:
print("Unknown command")
return root
def UpperCamelCase ( ):
lowerCAmelCase_ : str = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. ")
lowerCAmelCase_ : str = input()
while args != "q":
lowerCAmelCase_ : int = interact_treap(snake_case__ , snake_case__)
print(snake_case__)
lowerCAmelCase_ : str = input()
print("good by!")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 1 |
import numpy as np
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = 1e-12 , snake_case__ = 1_00 , ):
assert np.shape(snake_case__)[0] == np.shape(snake_case__)[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__)[0] == np.shape(snake_case__)[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__) == np.iscomplexobj(snake_case__)
lowerCAmelCase_ : List[Any] = np.iscomplexobj(snake_case__)
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[int] = 1e12
while not convergence:
# Multiple matrix by the vector.
lowerCAmelCase_ : Union[str, Any] = np.dot(snake_case__ , snake_case__)
# Normalize the resulting output vector.
lowerCAmelCase_ : str = w / np.linalg.norm(snake_case__)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCAmelCase_ : Any = vector.conj().T if is_complex else vector.T
lowerCAmelCase_ : Optional[int] = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__))
# Check convergence.
lowerCAmelCase_ : Union[str, Any] = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : str = lambda_
if is_complex:
lowerCAmelCase_ : Optional[Any] = np.real(lambda_)
return lambda_, vector
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[int] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]])
lowerCAmelCase_ : str = np.array([41, 4, 20])
lowerCAmelCase_ : Tuple = real_input_matrix.astype(np.complexaaa)
lowerCAmelCase_ : Union[str, Any] = np.triu(1j * complex_input_matrix , 1)
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCAmelCase_ : Optional[int] = np.array([41, 4, 20]).astype(np.complexaaa)
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCAmelCase_ : int = real_input_matrix
lowerCAmelCase_ : str = real_vector
elif problem_type == "complex":
lowerCAmelCase_ : Dict = complex_input_matrix
lowerCAmelCase_ : List[Any] = complex_vector
# Our implementation.
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = power_iteration(snake_case__ , snake_case__)
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCAmelCase_ , lowerCAmelCase_ : Any = np.linalg.eigh(snake_case__)
# Last eigenvalue is the maximum one.
lowerCAmelCase_ : Union[str, Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCAmelCase_ : Any = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__) - np.abs(snake_case__)) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 683 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowercase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {f"funnel-transformer/{name}": 512 for name in _model_names}
_lowercase = {f"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = 2
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : List[str]="<unk>" ,lowerCAmelCase__ : int="<sep>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : List[str]="<cls>" ,lowerCAmelCase__ : Optional[int]="<mask>" ,lowerCAmelCase__ : Union[str, Any]="<s>" ,lowerCAmelCase__ : List[str]="</s>" ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : List[Any]="##" ,**lowerCAmelCase__ : int ,) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,clean_text=lowerCAmelCase__ ,tokenize_chinese_chars=lowerCAmelCase__ ,strip_accents=lowerCAmelCase__ ,wordpieces_prefix=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[int] = getattr(lowerCAmelCase__ ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : List[str] = strip_accents
lowerCAmelCase_ : Any = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase_ : int = do_lower_case
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ,lowerCAmelCase__ : str=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : str = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 683 | 1 |
from collections.abc import Sequence
def UpperCamelCase ( snake_case__ , snake_case__ = False):
if not arr:
return 0
lowerCAmelCase_ : Dict = 0 if allow_empty_subarrays else float("-inf")
lowerCAmelCase_ : Optional[int] = 0.0
for num in arr:
lowerCAmelCase_ : Any = max(0 if allow_empty_subarrays else num , curr_sum + num)
lowerCAmelCase_ : Optional[int] = max(snake_case__ , snake_case__)
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"{max_subarray_sum(nums) = }")
| 683 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__):
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested")
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested")
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested")
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment")
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate")
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule")
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__)
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ : int = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCAmelCase_ : List[Any] = 0
# Doctest custom flag to ignore output.
_lowercase = doctest.register_optionflag('''IGNORE_RESULT''')
_lowercase = doctest.OutputChecker
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
_lowercase = CustomOutputChecker
_lowercase = HfDoctestModule
_lowercase = HfDocTestParser
| 683 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
def __init__( self : Optional[Any] ,lowerCAmelCase__ : UNetaDModel ,lowerCAmelCase__ : ScoreSdeVeScheduler ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self : List[str] ,lowerCAmelCase__ : int = 1 ,lowerCAmelCase__ : int = 20_00 ,lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCAmelCase__ : Optional[str] = "pil" ,lowerCAmelCase__ : bool = True ,**lowerCAmelCase__ : Optional[Any] ,) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
lowerCAmelCase_ : int = self.unet.config.sample_size
lowerCAmelCase_ : List[Any] = (batch_size, 3, img_size, img_size)
lowerCAmelCase_ : int = self.unet
lowerCAmelCase_ : List[str] = randn_tensor(lowerCAmelCase__ ,generator=lowerCAmelCase__ ) * self.scheduler.init_noise_sigma
lowerCAmelCase_ : Dict = sample.to(self.device )
self.scheduler.set_timesteps(lowerCAmelCase__ )
self.scheduler.set_sigmas(lowerCAmelCase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCAmelCase_ : List[str] = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCAmelCase_ : int = self.unet(lowerCAmelCase__ ,lowerCAmelCase__ ).sample
lowerCAmelCase_ : Tuple = self.scheduler.step_correct(lowerCAmelCase__ ,lowerCAmelCase__ ,generator=lowerCAmelCase__ ).prev_sample
# prediction step
lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ ,lowerCAmelCase__ ).sample
lowerCAmelCase_ : List[str] = self.scheduler.step_pred(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,generator=lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = output.prev_sample, output.prev_sample_mean
lowerCAmelCase_ : Optional[Any] = sample_mean.clamp(0 ,1 )
lowerCAmelCase_ : str = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ : Tuple = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 683 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : str = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Union[str, Any] = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Optional[Any]="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : int="<mask>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : int ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Dict = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[Any] = bytes_to_unicode()
lowerCAmelCase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Dict = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Tuple = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Optional[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Union[str, Any] = " " + text
return (text, kwargs)
| 683 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
lowerCAmelCase_ : List[str] = []
# custom device map
if isinstance(snake_case__ , snake_case__) and len(device_map.keys()) > 1:
lowerCAmelCase_ : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : Union[str, Any] = get_keys_to_not_convert(snake_case__)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__)
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__)
# compatibility with peft
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : List[str] = load_in_abit
lowerCAmelCase_ : Optional[int] = get_parameter_device(snake_case__)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
# convert param to the right dtype
lowerCAmelCase_ : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ : Optional[int] = name.replace(".weight" , "").replace(".bias" , "")
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , snake_case__ , snake_case__)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(snake_case__):
param.to(snake_case__)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ : str = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
lowerCAmelCase_ : Optional[int] = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None):
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(snake_case__ , snake_case__):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Union[str, Any] = special_dtypes
lowerCAmelCase_ : Union[str, Any] = no_split_module_classes
lowerCAmelCase_ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
snake_case__ , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case__ , **snake_case__ , )
lowerCAmelCase_ : Tuple = max_memory
lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(snake_case__ , **snake_case__)
if isinstance(snake_case__ , snake_case__):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
if modules_to_not_convert is None:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : Optional[int] = []
current_key_name.append(snake_case__)
if isinstance(snake_case__ , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : Optional[int] = ".".join(snake_case__)
lowerCAmelCase_ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
lowerCAmelCase_ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : Any = module.bias.data
bnb_module.requires_grad_(snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = True
if len(list(module.children())) > 0:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def UpperCamelCase ( snake_case__):
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ : List[Any] = deepcopy(snake_case__) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Dict = find_tied_parameters(snake_case__)
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ : Optional[Any] = sum(snake_case__ , [])
lowerCAmelCase_ : List[Any] = len(snake_case__) > 0
# Check if it is a base model
lowerCAmelCase_ : List[str] = False
if hasattr(snake_case__ , "base_model_prefix"):
lowerCAmelCase_ : Tuple = not hasattr(snake_case__ , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : Union[str, Any] = list(model.named_children())
lowerCAmelCase_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Any = set(snake_case__) - set(snake_case__)
lowerCAmelCase_ : Tuple = list(set(snake_case__)) + list(snake_case__)
# remove ".weight" from the keys
lowerCAmelCase_ : List[str] = [".weight", ".bias"]
lowerCAmelCase_ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : str = name.replace(snake_case__ , "")
filtered_module_names.append(snake_case__)
return filtered_module_names
def UpperCamelCase ( snake_case__):
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit):
return True
return False
def UpperCamelCase ( snake_case__):
return next(parameter.parameters()).device
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__)
lowerCAmelCase_ : str = param_name
lowerCAmelCase_ : Tuple = model
if "." in tensor_name:
lowerCAmelCase_ : Dict = tensor_name.split(".")
for split in splits[:-1]:
lowerCAmelCase_ : Any = getattr(snake_case__ , snake_case__)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Any = splits[-1]
# offload weights
lowerCAmelCase_ : List[Any] = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__)
offload_weight(snake_case__ , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__)
set_module_tensor_to_device(snake_case__ , snake_case__ , "meta" , dtype=snake_case__ , value=torch.empty(*param.size()))
| 683 | 1 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
_lowercase = 100
_lowercase = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_lowercase = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00)
def UpperCamelCase ( snake_case__):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase_ : set[int] = set()
lowerCAmelCase_ : int
lowerCAmelCase_ : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime):
ret.add(sub * prime)
return ret
def UpperCamelCase ( snake_case__ = 50_00):
for number_to_partition in range(1 , snake_case__):
if len(partition(snake_case__)) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"{solution() = }")
| 683 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'is_longer']
def __init__( self : Optional[int] ,lowerCAmelCase__ : List[Any]=64 ,lowerCAmelCase__ : Any=4_80_00 ,lowerCAmelCase__ : Optional[Any]=4_80 ,lowerCAmelCase__ : List[str]=10 ,lowerCAmelCase__ : List[Any]=10_24 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : float = 0 ,lowerCAmelCase__ : float = 1_40_00 ,lowerCAmelCase__ : int = None ,lowerCAmelCase__ : str = "fusion" ,lowerCAmelCase__ : str = "repeatpad" ,**lowerCAmelCase__ : Union[str, Any] ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Optional[Any] = top_db
lowerCAmelCase_ : str = truncation
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : str = fft_window_size
lowerCAmelCase_ : Dict = (fft_window_size >> 1) + 1
lowerCAmelCase_ : Dict = hop_length
lowerCAmelCase_ : Any = max_length_s
lowerCAmelCase_ : int = max_length_s * sampling_rate
lowerCAmelCase_ : Optional[int] = sampling_rate
lowerCAmelCase_ : int = frequency_min
lowerCAmelCase_ : Optional[Any] = frequency_max
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm=lowerCAmelCase__ ,mel_scale="htk" ,)
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = spectrogram(
lowerCAmelCase__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCAmelCase__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
# randomly choose index for each part
lowerCAmelCase_ : str = np.random.choice(ranges[0] )
lowerCAmelCase_ : Optional[Any] = np.random.choice(ranges[1] )
lowerCAmelCase_ : Any = np.random.choice(ranges[2] )
lowerCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase_ : List[str] = torch.tensor(mel[None, None, :] )
lowerCAmelCase_ : List[Any] = torch.nn.functional.interpolate(
lowerCAmelCase__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = mel_shrink[0][0].numpy()
lowerCAmelCase_ : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase_ : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase_ : str = len(lowerCAmelCase__ ) - max_length
lowerCAmelCase_ : Any = np.random.randint(0 ,overflow + 1 )
lowerCAmelCase_ : Dict = waveform[idx : idx + max_length]
lowerCAmelCase_ : List[str] = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase_ : Tuple = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase_ : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase_ : Dict = np.stack([mel, mel, mel, mel] ,axis=0 )
lowerCAmelCase_ : int = False
else:
lowerCAmelCase_ : str = self._random_mel_fusion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase_ : List[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : int = np.stack(np.tile(lowerCAmelCase__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase_ : Optional[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Tuple = np.stack(np.tile(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = np.pad(lowerCAmelCase__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
lowerCAmelCase_ : int = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
lowerCAmelCase_ : str = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : str = None ,lowerCAmelCase__ : Optional[str] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCAmelCase__ : List[Any] ,) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ : List[str] = truncation if truncation is not None else self.truncation
lowerCAmelCase_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Dict = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : Tuple = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray(lowerCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase_ : Optional[Any] = [
self._get_input_mel(lowerCAmelCase__ ,max_length if max_length else self.nb_max_samples ,lowerCAmelCase__ ,lowerCAmelCase__ )
for waveform in raw_speech
]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : str = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase__ )
is_longer.append(lowerCAmelCase__ )
if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase_ : Any = np.random.randint(0 ,len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = True
if isinstance(input_mel[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase_ : List[Any] = [[longer] for longer in is_longer]
lowerCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
lowerCAmelCase_ : Dict = BatchFeature(lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : List[str] = input_features.convert_to_tensors(lowerCAmelCase__ )
return input_features
| 683 | 1 |
from __future__ import annotations
from collections import namedtuple
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = namedtuple("result" , "name value")
if (voltage, current, power).count(0) != 1:
raise ValueError("Only one argument must be 0")
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system")
elif voltage == 0:
return result("voltage" , power / current)
elif current == 0:
return result("current" , power / voltage)
elif power == 0:
return result("power" , float(round(abs(voltage * current) , 2)))
else:
raise ValueError("Exactly one argument must be 0")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowercase = Lock()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase_ : Optional[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase_ : Any = min(snake_case__ , snake_case__)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase_ : Dict = max(snake_case__ , snake_case__)
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase_ : Tuple = Pipe()
lowerCAmelCase_ : Optional[int] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
lowerCAmelCase_ : int = temp_rs
lowerCAmelCase_ : List[Any] = temp_rr
for i in range(1 , len(snake_case__) - 1):
lowerCAmelCase_ : Dict = Pipe()
lowerCAmelCase_ : List[str] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
lowerCAmelCase_ : Dict = temp_rs
lowerCAmelCase_ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__) - 1,
arr[len(snake_case__) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__)):
lowerCAmelCase_ : Union[str, Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = list(range(10 , 0 , -1))
print("Initial List")
print(*snake_case__)
lowerCAmelCase_ : Tuple = odd_even_transposition(snake_case__)
print("Sorted List\n")
print(*snake_case__)
if __name__ == "__main__":
main()
| 683 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase_ ( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : str = 1
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : List[Any] = (32, 32)
lowerCAmelCase_ : Tuple = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
return model
@property
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
return model
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : int = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=50_06 ,)
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
def extract(*lowerCAmelCase__ : Optional[Any] ,**lowerCAmelCase__ : str ):
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : int = torch.ones([0] )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[Any] = self.dummy_cond_unet
lowerCAmelCase_ : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
lowerCAmelCase_ : str = self.dummy_vae
lowerCAmelCase_ : List[Any] = self.dummy_text_encoder
lowerCAmelCase_ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCAmelCase_ : List[Any] = 77
lowerCAmelCase_ : Union[str, Any] = self.dummy_image.to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : str = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ ,vae=lowerCAmelCase__ ,text_encoder=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ ,feature_extractor=self.dummy_extractor ,)
lowerCAmelCase_ : List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=lowerCAmelCase__ )
lowerCAmelCase_ : Any = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
lowerCAmelCase_ : Dict = alt_pipe(
[prompt] ,generator=lowerCAmelCase__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="np" ,image=lowerCAmelCase__ ,)
lowerCAmelCase_ : Union[str, Any] = output.images
lowerCAmelCase_ : str = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
lowerCAmelCase_ : Optional[Any] = alt_pipe(
[prompt] ,generator=lowerCAmelCase__ ,guidance_scale=6.0 ,num_inference_steps=2 ,output_type="np" ,image=lowerCAmelCase__ ,return_dict=lowerCAmelCase__ ,)[0]
lowerCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ : List[Any] = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" ,"This test requires a GPU" )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.dummy_cond_unet
lowerCAmelCase_ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = self.dummy_vae
lowerCAmelCase_ : Any = self.dummy_text_encoder
lowerCAmelCase_ : Any = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCAmelCase_ : Tuple = 77
lowerCAmelCase_ : Optional[int] = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
lowerCAmelCase_ : Dict = unet.half()
lowerCAmelCase_ : Any = vae.half()
lowerCAmelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase_ : List[str] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ ,scheduler=lowerCAmelCase__ ,vae=lowerCAmelCase__ ,text_encoder=lowerCAmelCase__ ,tokenizer=lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ ,feature_extractor=self.dummy_extractor ,)
lowerCAmelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor ,do_normalize=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Any = "A painting of a squirrel eating a burger"
lowerCAmelCase_ : str = torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = alt_pipe(
[prompt] ,generator=lowerCAmelCase__ ,num_inference_steps=2 ,output_type="np" ,image=lowerCAmelCase__ ,).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" ,"This test requires a GPU" )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase_ : Dict = init_image.resize((7_60, 5_04) )
lowerCAmelCase_ : Union[str, Any] = "BAAI/AltDiffusion"
lowerCAmelCase_ : str = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ ,)
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
lowerCAmelCase_ : Tuple = "A fantasy landscape, trending on artstation"
lowerCAmelCase_ : List[str] = torch.manual_seed(0 )
lowerCAmelCase_ : str = pipe(
prompt=lowerCAmelCase__ ,image=lowerCAmelCase__ ,strength=0.75 ,guidance_scale=7.5 ,generator=lowerCAmelCase__ ,output_type="np" ,)
lowerCAmelCase_ : Union[str, Any] = output.images[0]
lowerCAmelCase_ : int = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
lowerCAmelCase_ : Tuple = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase_ : str = init_image.resize((7_68, 5_12) )
lowerCAmelCase_ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowerCAmelCase_ : List[Any] = "BAAI/AltDiffusion"
lowerCAmelCase_ : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ ,)
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
lowerCAmelCase_ : List[str] = "A fantasy landscape, trending on artstation"
lowerCAmelCase_ : Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = pipe(
prompt=lowerCAmelCase__ ,image=lowerCAmelCase__ ,strength=0.75 ,guidance_scale=7.5 ,generator=lowerCAmelCase__ ,output_type="np" ,)
lowerCAmelCase_ : Any = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 683 |
from typing import Any
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict = {}
lowerCAmelCase_ : dict = {}
for state in states_space:
lowerCAmelCase_ : List[Any] = observations_space[0]
lowerCAmelCase_ : int = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__)):
lowerCAmelCase_ : List[Any] = observations_space[o]
lowerCAmelCase_ : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Tuple = -1
for k_state in states_space:
lowerCAmelCase_ : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Optional[Any] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : Any = arg_max
# The final observation
lowerCAmelCase_ : List[Any] = observations_space[len(snake_case__) - 1]
# argmax for given final observation
lowerCAmelCase_ : List[str] = ""
lowerCAmelCase_ : List[str] = -1
for k_state in states_space:
lowerCAmelCase_ : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Tuple = k_state
lowerCAmelCase_ : str = arg_max
# Process pointers backwards
lowerCAmelCase_ : int = last_state
lowerCAmelCase_ : int = []
for o in range(len(snake_case__) - 1 , -1 , -1):
result.append(snake_case__)
lowerCAmelCase_ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__)
_validate_dicts(
snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError("There's an empty parameter")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_list(snake_case__ , "observations_space")
_validate_list(snake_case__ , "states_space")
def UpperCamelCase ( snake_case__ , snake_case__):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list'''
raise ValueError(snake_case__)
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
_validate_dict(snake_case__ , "initial_probabilities" , snake_case__)
_validate_nested_dict(snake_case__ , "transition_probabilities")
_validate_nested_dict(snake_case__ , "emission_probabilities")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_dict(_object , snake_case__ , snake_case__)
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object):
lowerCAmelCase_ : Dict = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object.values()):
lowerCAmelCase_ : Union[str, Any] = "nested dictionary " if nested else ""
lowerCAmelCase_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 | 1 |
from manim import *
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Rectangle(height=0.5 ,width=0.5 )
lowerCAmelCase_ : int = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCAmelCase_ : str = Rectangle(height=0.25 ,width=0.25 )
lowerCAmelCase_ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : int = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Any = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : int = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : Union[str, Any] = VGroup(lowerCAmelCase__ ,lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : int = Text("CPU" ,font_size=24 )
lowerCAmelCase_ : Dict = Group(lowerCAmelCase__ ,lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0.5 ,aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = [mem.copy() for i in range(4 )]
lowerCAmelCase_ : Optional[int] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : List[Any] = Text("GPU" ,font_size=24 )
lowerCAmelCase_ : Tuple = Group(lowerCAmelCase__ ,lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0.5 ,aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
lowerCAmelCase_ : Any = [mem.copy() for i in range(6 )]
lowerCAmelCase_ : Dict = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : Optional[int] = Text("Model" ,font_size=24 )
lowerCAmelCase_ : str = Group(lowerCAmelCase__ ,lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0.5 ,aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Tuple = []
for i, rect in enumerate(lowerCAmelCase__ ):
lowerCAmelCase_ : Tuple = fill.copy().set_fill(lowerCAmelCase__ ,opacity=0.8 )
target.move_to(lowerCAmelCase__ )
model_arr.append(lowerCAmelCase__ )
lowerCAmelCase_ : str = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ ,*lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : Tuple = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase_ : List[Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : Dict = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : str = VGroup(lowerCAmelCase__ ,lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0 )
lowerCAmelCase_ : Any = Text("Disk" ,font_size=24 )
lowerCAmelCase_ : Tuple = Group(lowerCAmelCase__ ,lowerCAmelCase__ ).arrange(lowerCAmelCase__ ,buff=0.5 ,aligned_edge=lowerCAmelCase__ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase_ : Optional[int] = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' ,font_size=18 ,)
blue_text.next_to(lowerCAmelCase__ ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(lowerCAmelCase__ )
lowerCAmelCase_ : int = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = Square(0.3 )
input.set_fill(lowerCAmelCase__ ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,lowerCAmelCase__ ,buff=0.5 )
self.play(Write(lowerCAmelCase__ ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=lowerCAmelCase__ ,buff=0.02 )
self.play(MoveToTarget(lowerCAmelCase__ ) )
self.play(FadeOut(lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = Arrow(start=lowerCAmelCase__ ,end=lowerCAmelCase__ ,color=lowerCAmelCase__ ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,lowerCAmelCase__ ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCAmelCase_ : Optional[int] = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ ,run_time=3 ) )
lowerCAmelCase_ : Any = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(lowerCAmelCase__ ) ,Circumscribe(model_arr[0] ,color=lowerCAmelCase__ ,**lowerCAmelCase__ ) ,Circumscribe(model_cpu_arr[0] ,color=lowerCAmelCase__ ,**lowerCAmelCase__ ) ,Circumscribe(gpu_rect[0] ,color=lowerCAmelCase__ ,**lowerCAmelCase__ ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCAmelCase_ : Tuple = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,lowerCAmelCase__ ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCAmelCase_ : Optional[int] = AnimationGroup(
FadeOut(lowerCAmelCase__ ,run_time=0.5 ) ,MoveToTarget(lowerCAmelCase__ ,run_time=0.5 ) ,FadeIn(lowerCAmelCase__ ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(lowerCAmelCase__ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCAmelCase_ : Any = 0.7
self.play(
Circumscribe(model_arr[i] ,**lowerCAmelCase__ ) ,Circumscribe(cpu_left_col_base[i] ,**lowerCAmelCase__ ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=lowerCAmelCase__ ,**lowerCAmelCase__ ) ,Circumscribe(gpu_rect[0] ,color=lowerCAmelCase__ ,**lowerCAmelCase__ ) ,Circumscribe(model_arr[i + 1] ,color=lowerCAmelCase__ ,**lowerCAmelCase__ ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=lowerCAmelCase__ ,**lowerCAmelCase__ ) ,Circumscribe(cpu_left_col_base[-1] ,color=lowerCAmelCase__ ,**lowerCAmelCase__ ) ,Circumscribe(gpu_rect[0] ,color=lowerCAmelCase__ ,**lowerCAmelCase__ ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCAmelCase_ : Any = a_c
lowerCAmelCase_ : Dict = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(lowerCAmelCase__ ) ,FadeOut(lowerCAmelCase__ ,run_time=0.5 ) ,)
lowerCAmelCase_ : Union[str, Any] = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ ,run_time=3 ) ,MoveToTarget(lowerCAmelCase__ ) )
self.wait()
| 683 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 1 |
from typing import Dict, Optional
import numpy as np
import datasets
_lowercase = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
_lowercase = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
_lowercase = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCAmelCase_ : Optional[int] = new_id
# turn into Numpy arrays
lowerCAmelCase_ : str = np.array(snake_case__)
lowerCAmelCase_ : Tuple = np.array(snake_case__)
if reduce_labels:
lowerCAmelCase_ : Tuple = 2_55
lowerCAmelCase_ : Dict = label - 1
lowerCAmelCase_ : Any = 2_55
lowerCAmelCase_ : Optional[Any] = label != ignore_index
lowerCAmelCase_ : Union[str, Any] = np.not_equal(snake_case__ , snake_case__)
lowerCAmelCase_ : Union[str, Any] = pred_label[mask]
lowerCAmelCase_ : Any = np.array(snake_case__)[mask]
lowerCAmelCase_ : Optional[int] = pred_label[pred_label == label]
lowerCAmelCase_ : List[str] = np.histogram(snake_case__ , bins=snake_case__ , range=(0, num_labels - 1))[0]
lowerCAmelCase_ : Tuple = np.histogram(snake_case__ , bins=snake_case__ , range=(0, num_labels - 1))[0]
lowerCAmelCase_ : Optional[Any] = np.histogram(snake_case__ , bins=snake_case__ , range=(0, num_labels - 1))[0]
lowerCAmelCase_ : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = np.zeros((num_labels,) , dtype=np.floataa)
lowerCAmelCase_ : Tuple = np.zeros((num_labels,) , dtype=np.floataa)
lowerCAmelCase_ : str = np.zeros((num_labels,) , dtype=np.floataa)
lowerCAmelCase_ : Any = np.zeros((num_labels,) , dtype=np.floataa)
for result, gt_seg_map in zip(snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = intersect_and_union(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = total_intersect_and_union(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
# compute metrics
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Any = total_area_intersect.sum() / total_area_label.sum()
lowerCAmelCase_ : Any = total_area_intersect / total_area_union
lowerCAmelCase_ : Optional[int] = total_area_intersect / total_area_label
lowerCAmelCase_ : List[str] = np.nanmean(snake_case__)
lowerCAmelCase_ : Union[str, Any] = np.nanmean(snake_case__)
lowerCAmelCase_ : Tuple = all_acc
lowerCAmelCase_ : List[str] = iou
lowerCAmelCase_ : List[Any] = acc
if nan_to_num is not None:
lowerCAmelCase_ : List[str] = {metric: np.nan_to_num(snake_case__ , nan=snake_case__) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) ,reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] ,)
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : int ,lowerCAmelCase__ : bool ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Dict[int, int]] = None ,lowerCAmelCase__ : bool = False ,) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = mean_iou(
results=lowerCAmelCase__ ,gt_seg_maps=lowerCAmelCase__ ,num_labels=lowerCAmelCase__ ,ignore_index=lowerCAmelCase__ ,nan_to_num=lowerCAmelCase__ ,label_map=lowerCAmelCase__ ,reduce_labels=lowerCAmelCase__ ,)
return iou_result
| 683 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowercase = None
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case__ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def UpperCamelCase ( snake_case__):
def remove_articles(snake_case__):
return ARTICLES_REGEX.sub(" " , snake_case__)
def white_space_fix(snake_case__):
return " ".join(text.split())
def remove_punc(snake_case__):
lowerCAmelCase_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def UpperCamelCase ( snake_case__):
if not s:
return []
return normalize_answer(snake_case__).split()
def UpperCamelCase ( snake_case__ , snake_case__):
return int(normalize_answer(snake_case__) == normalize_answer(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = get_tokens(snake_case__)
lowerCAmelCase_ : Union[str, Any] = get_tokens(snake_case__)
lowerCAmelCase_ : Any = collections.Counter(snake_case__) & collections.Counter(snake_case__)
lowerCAmelCase_ : Dict = sum(common.values())
if len(snake_case__) == 0 or len(snake_case__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : int = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : int = qa["id"]
lowerCAmelCase_ : Any = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCAmelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : Any = max(compute_exact(snake_case__ , snake_case__) for a in gold_answers)
lowerCAmelCase_ : Optional[Any] = max(compute_fa(snake_case__ , snake_case__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = {}
for qid, s in scores.items():
lowerCAmelCase_ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : List[str] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase_ : Union[str, Any] = s
return new_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
if not qid_list:
lowerCAmelCase_ : Any = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCAmelCase_ : Tuple = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
plt.step(snake_case__ , snake_case__ , color="b" , alpha=0.2 , where="post")
plt.fill_between(snake_case__ , snake_case__ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(snake_case__)
plt.savefig(snake_case__)
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
lowerCAmelCase_ : List[Any] = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Tuple = [1.0]
lowerCAmelCase_ : Tuple = [0.0]
lowerCAmelCase_ : Dict = 0.0
for i, qid in enumerate(snake_case__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : str = true_pos / float(i + 1)
lowerCAmelCase_ : Union[str, Any] = true_pos / float(snake_case__)
if i == len(snake_case__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__)
recalls.append(snake_case__)
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return {"ap": 100.0 * avg_prec}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
if out_image_dir and not os.path.exists(snake_case__):
os.makedirs(snake_case__)
lowerCAmelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase_ : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ : Dict = {k: float(snake_case__) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : str = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case__ , snake_case__ , "pr_exact")
merge_eval(snake_case__ , snake_case__ , "pr_f1")
merge_eval(snake_case__ , snake_case__ , "pr_oracle")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if not qid_list:
return
lowerCAmelCase_ : Optional[Any] = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : Dict = np.ones_like(snake_case__) / float(len(snake_case__))
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(snake_case__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase_ : str = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : str = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
for i, qid in enumerate(snake_case__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : List[Any] = -1
else:
lowerCAmelCase_ : List[str] = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[Any] = cur_score
lowerCAmelCase_ : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(snake_case__), best_thresh
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = best_exact
lowerCAmelCase_ : List[str] = exact_thresh
lowerCAmelCase_ : Any = best_fa
lowerCAmelCase_ : List[str] = fa_thresh
def UpperCamelCase ( ):
with open(OPTS.data_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
lowerCAmelCase_ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCAmelCase_ : int = json.load(snake_case__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
else:
lowerCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
lowerCAmelCase_ : Tuple = make_qid_to_has_ans(snake_case__) # maps qid to True/False
lowerCAmelCase_ : Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_raw_scores(snake_case__ , snake_case__)
lowerCAmelCase_ : str = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Dict = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__)
if has_ans_qids:
lowerCAmelCase_ : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "HasAns")
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir)
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(snake_case__ , snake_case__)
else:
print(json.dumps(snake_case__ , indent=2))
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 683 | 1 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_lowercase = 2048
_lowercase = 4096
_lowercase = 42
_lowercase = os.environ.pop('''PROCESS_TRAIN''', '''false''')
_lowercase = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def UpperCamelCase ( snake_case__):
def choose_first(snake_case__ , snake_case__=False):
assert isinstance(snake_case__ , snake_case__)
if len(snake_case__) == 1:
lowerCAmelCase_ : Tuple = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowerCAmelCase_ : Tuple = {k: [a[k]] for k in a}
if len(a["start_token"]) > 0:
break
return a
lowerCAmelCase_ : Union[str, Any] = {"id": example["id"]}
lowerCAmelCase_ : Optional[int] = example["annotations"]
lowerCAmelCase_ : Union[str, Any] = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowerCAmelCase_ : Any = ["yes"] if 1 in yes_no_answer else ["no"]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Tuple = ["<cls>"]
else:
lowerCAmelCase_ : Tuple = ["short"]
lowerCAmelCase_ : Dict = choose_first(annotation["short_answers"])
if len(out["start_token"]) == 0:
# answer will be long if short is not available
lowerCAmelCase_ : str = ["long"]
lowerCAmelCase_ : Union[str, Any] = choose_first(annotation["long_answer"] , is_long_answer=snake_case__)
lowerCAmelCase_ : Any = []
answer.update(snake_case__)
# disregard some samples
if len(answer["start_token"]) > 1 or answer["start_token"] == answer["end_token"]:
lowerCAmelCase_ : Optional[Any] = True
else:
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : str = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , snake_case__) for k in cols):
raise ValueError("Issue in ID" , example["id"])
return answer
def UpperCamelCase ( snake_case__ , snake_case__=False):
lowerCAmelCase_ : int = _get_single_answer(snake_case__)
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase_ : Dict = example["document"]["tokens"]
lowerCAmelCase_ : str = []
for i in range(len(doc["token"])):
if not doc["is_html"][i]:
context.append(doc["token"][i])
return {
"context": " ".join(snake_case__),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowerCAmelCase_ : Optional[int] = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k]) > 0 else answer[k] for k in cols}) # e.g. [10] == 10
lowerCAmelCase_ : List[str] = example["document"]["tokens"]
lowerCAmelCase_ : int = answer["start_token"]
lowerCAmelCase_ : Union[str, Any] = answer["end_token"]
lowerCAmelCase_ : str = []
for i in range(len(doc["token"])):
if not doc["is_html"][i]:
context.append(doc["token"][i])
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowerCAmelCase_ : Any = " ".join(context[start_token:end_token])
# checking above code
if assertion:
lowerCAmelCase_ : List[Any] = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowerCAmelCase_ : Any = doc["token"][answer["start_token"] : answer["end_token"]]
lowerCAmelCase_ : Optional[int] = " ".join([old[i] for i in range(len(snake_case__)) if not is_html[i]])
if new != old:
print("ID:" , example["id"])
print("New:" , snake_case__ , end="\n")
print("Old:" , snake_case__ , end="\n\n")
return {
"context": " ".join(snake_case__),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=20_48 , snake_case__=40_96 , snake_case__=True):
# overlap will be of doc_stride - q_len
lowerCAmelCase_ : Any = get_context_and_ans(snake_case__ , assertion=snake_case__)
lowerCAmelCase_ : Union[str, Any] = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowerCAmelCase_ : Dict = tokenizer(example["question"]["text"] , out["context"]).input_ids
lowerCAmelCase_ : Optional[Any] = input_ids.index(tokenizer.sep_token_id) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : List[str] = input_ids[:q_len]
lowerCAmelCase_ : Union[str, Any] = range(snake_case__ , len(snake_case__) , max_length - doc_stride)
for i in doc_start_indices:
lowerCAmelCase_ : List[str] = i + max_length - q_len
lowerCAmelCase_ : int = input_ids[i:end_index]
inputs.append(q_indices + slice)
category.append(answer["category"][0])
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(snake_case__),
"end_token": [-1_00] * len(snake_case__),
"category": category,
},
}
lowerCAmelCase_ : Optional[Any] = out["context"].split()
lowerCAmelCase_ : List[str] = splitted_context[answer["end_token"]]
lowerCAmelCase_ : Optional[int] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]]) , add_special_tokens=snake_case__ , ).input_ids)
lowerCAmelCase_ : int = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]]) , add_special_tokens=snake_case__).input_ids)
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowerCAmelCase_ : Union[str, Any] = len(tokenizer(snake_case__ , add_special_tokens=snake_case__).input_ids)
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowerCAmelCase_ : Dict = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowerCAmelCase_ : Any = answer["start_token"]
lowerCAmelCase_ : List[Any] = answer["end_token"]
if assertion:
lowerCAmelCase_ : Optional[int] = tokenizer.decode(snake_case__)
if answer["span"] != new:
print("ISSUE IN TOKENIZATION")
print("OLD:" , answer["span"])
print("NEW:" , snake_case__ , end="\n\n")
if len(snake_case__) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowerCAmelCase_ : str = input_ids[:q_len]
lowerCAmelCase_ : int = range(snake_case__ , len(snake_case__) , max_length - doc_stride)
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : str = []
lowerCAmelCase_ : Optional[Any] = [] # null, yes, no, long, short
for i in doc_start_indices:
lowerCAmelCase_ : List[Any] = i + max_length - q_len
lowerCAmelCase_ : Any = input_ids[i:end_index]
inputs.append(q_indices + slice)
assert len(inputs[-1]) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowerCAmelCase_ : List[str] = start_token - i + q_len
lowerCAmelCase_ : Dict = end_token - i + q_len
answers_category.append(answer["category"][0]) # ["short"] -> "short"
else:
lowerCAmelCase_ : Optional[int] = -1_00
lowerCAmelCase_ : str = -1_00
answers_category.append("null")
lowerCAmelCase_ : Any = inputs[-1][start_token : end_token + 1]
answers_start_token.append(snake_case__)
answers_end_token.append(snake_case__)
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"])
print("New:" , tokenizer.decode(snake_case__))
print("Old:" , tokenizer.decode(snake_case__) , end="\n\n")
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=20_48 , snake_case__=40_96 , snake_case__=False):
lowerCAmelCase_ : Union[str, Any] = get_strided_contexts_and_ans(
snake_case__ , snake_case__ , doc_stride=snake_case__ , max_length=snake_case__ , assertion=snake_case__ , )
return example
def UpperCamelCase ( snake_case__ , snake_case__):
with jsonlines.open(snake_case__ , "a") as writer:
for example in tqdm(snake_case__ , total=len(snake_case__) , desc="Saving samples ... "):
lowerCAmelCase_ : List[str] = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
})
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_lowercase = load_dataset('''natural_questions''')
_lowercase = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
_lowercase = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
_lowercase = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
_lowercase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_lowercase = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
_lowercase = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 683 |
from math import sqrt
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = 0
for i in range(1 , int(sqrt(snake_case__) + 1)):
if n % i == 0 and i != sqrt(snake_case__):
total += i + n // i
elif i == sqrt(snake_case__):
total += i
return total - n
def UpperCamelCase ( snake_case__ = 1_00_00):
lowerCAmelCase_ : int = sum(
i
for i in range(1 , snake_case__)
if sum_of_divisors(sum_of_divisors(snake_case__)) == i and sum_of_divisors(snake_case__) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 683 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'convbert'
def __init__( self : Optional[Any] ,lowerCAmelCase__ : List[str]=3_05_22 ,lowerCAmelCase__ : Optional[Any]=7_68 ,lowerCAmelCase__ : Any=12 ,lowerCAmelCase__ : int=12 ,lowerCAmelCase__ : int=30_72 ,lowerCAmelCase__ : Optional[int]="gelu" ,lowerCAmelCase__ : Any=0.1 ,lowerCAmelCase__ : List[str]=0.1 ,lowerCAmelCase__ : Optional[Any]=5_12 ,lowerCAmelCase__ : List[str]=2 ,lowerCAmelCase__ : Union[str, Any]=0.02 ,lowerCAmelCase__ : str=1e-1_2 ,lowerCAmelCase__ : Union[str, Any]=1 ,lowerCAmelCase__ : Optional[Any]=0 ,lowerCAmelCase__ : Union[str, Any]=2 ,lowerCAmelCase__ : int=7_68 ,lowerCAmelCase__ : Tuple=2 ,lowerCAmelCase__ : Optional[Any]=9 ,lowerCAmelCase__ : str=1 ,lowerCAmelCase__ : List[Any]=None ,**lowerCAmelCase__ : List[str] ,) -> List[str]:
'''simple docstring'''
super().__init__(
pad_token_id=lowerCAmelCase__ ,bos_token_id=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : List[str] = max_position_embeddings
lowerCAmelCase_ : List[str] = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : Tuple = embedding_size
lowerCAmelCase_ : Tuple = head_ratio
lowerCAmelCase_ : Any = conv_kernel_size
lowerCAmelCase_ : int = num_groups
lowerCAmelCase_ : Tuple = classifier_dropout
class __snake_case ( snake_case__ ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase_ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase_ : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 683 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 1 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__):
if len(snake_case__) < k or k < 0:
raise ValueError("Invalid Input")
lowerCAmelCase_ : Tuple = sum(array[:k])
for i in range(len(snake_case__) - k):
lowerCAmelCase_ : Any = current_sum - array[i] + array[i + k]
lowerCAmelCase_ : Dict = max(snake_case__ , snake_case__)
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_lowercase = [randint(-1000, 1000) for i in range(100)]
_lowercase = randint(0, 110)
print(f"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 683 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : str = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Union[str, Any] = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Optional[Any]="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : int="<mask>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : int ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Dict = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[Any] = bytes_to_unicode()
lowerCAmelCase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Dict = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Tuple = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Optional[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Union[str, Any] = " " + text
return (text, kwargs)
| 683 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
_lowercase = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = ['input_ids', 'attention_mask']
UpperCamelCase_ = DistilBertTokenizer
def __init__( self : Optional[int] ,lowerCAmelCase__ : Tuple=None ,lowerCAmelCase__ : List[str]=None ,lowerCAmelCase__ : Dict=True ,lowerCAmelCase__ : Any="[UNK]" ,lowerCAmelCase__ : int="[SEP]" ,lowerCAmelCase__ : str="[PAD]" ,lowerCAmelCase__ : List[Any]="[CLS]" ,lowerCAmelCase__ : Dict="[MASK]" ,lowerCAmelCase__ : Any=True ,lowerCAmelCase__ : Any=None ,**lowerCAmelCase__ : Tuple ,) -> List[str]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,tokenize_chinese_chars=lowerCAmelCase__ ,strip_accents=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : List[Any] = getattr(lowerCAmelCase__ ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : Optional[int] = do_lower_case
lowerCAmelCase_ : Dict = strip_accents
lowerCAmelCase_ : Any = tokenize_chinese_chars
lowerCAmelCase_ : Union[str, Any] = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = do_lower_case
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : int = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 683 |
from collections.abc import Iterable
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : int | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} ,indent=1 )
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Node | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = root
def __str__( self : Dict ) -> str:
'''simple docstring'''
return str(self.root )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Node ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : Any = new_children
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Node ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.root is None
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ : Optional[int] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ : List[str] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ : Dict = new_node
break
else:
lowerCAmelCase_ : str = parent_node.right
lowerCAmelCase_ : Optional[int] = parent_node
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int] ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ : Union[str, Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ : Union[str, Any] = node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowerCAmelCase_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ : Dict = self.root
while node.left is not None:
lowerCAmelCase_ : Union[str, Any] = node.left
return node
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ ,node.left )
else:
lowerCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node | None ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : list ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if node:
self.inorder(lowerCAmelCase__ ,node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ ,node.right )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Node ) -> int:
'''simple docstring'''
lowerCAmelCase_ : list[int] = []
self.inorder(lowerCAmelCase__ ,lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = []
if curr_node is not None:
lowerCAmelCase_ : Dict = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(snake_case__)
# Prints all the elements of the list in order traversal
print(snake_case__)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case__)
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __snake_case ( snake_case__ , snake_case__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : int = 1_28 ,lowerCAmelCase__ : int = 2_56 ,lowerCAmelCase__ : float = 2_000.0 ,lowerCAmelCase__ : int = 7_68 ,lowerCAmelCase__ : int = 12 ,lowerCAmelCase__ : int = 12 ,lowerCAmelCase__ : int = 64 ,lowerCAmelCase__ : int = 20_48 ,lowerCAmelCase__ : float = 0.1 ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : int = nn.Sequential(
nn.Linear(lowerCAmelCase__ ,d_model * 4 ,bias=lowerCAmelCase__ ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=lowerCAmelCase__ ) ,nn.SiLU() ,)
lowerCAmelCase_ : List[str] = nn.Embedding(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : str = False
lowerCAmelCase_ : List[Any] = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = nn.Dropout(p=lowerCAmelCase__ )
lowerCAmelCase_ : Any = nn.ModuleList()
for lyr_num in range(lowerCAmelCase__ ):
# FiLM conditional T5 decoder
lowerCAmelCase_ : Optional[int] = DecoderLayer(d_model=lowerCAmelCase__ ,d_kv=lowerCAmelCase__ ,num_heads=lowerCAmelCase__ ,d_ff=lowerCAmelCase__ ,dropout_rate=lowerCAmelCase__ )
self.decoders.append(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = TaLayerNorm(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = nn.Dropout(p=lowerCAmelCase__ )
lowerCAmelCase_ : int = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCAmelCase_ : List[str] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype )
lowerCAmelCase_ : Dict = self.conditioning_emb(lowerCAmelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCAmelCase_ : str = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCAmelCase_ : List[Any] = torch.broadcast_to(
torch.arange(lowerCAmelCase__ ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,)
lowerCAmelCase_ : str = self.position_encoding(lowerCAmelCase__ )
lowerCAmelCase_ : int = self.continuous_inputs_projection(lowerCAmelCase__ )
inputs += position_encodings
lowerCAmelCase_ : Optional[Any] = self.dropout(lowerCAmelCase__ )
# decoder: No padding present.
lowerCAmelCase_ : Union[str, Any] = torch.ones(
decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCAmelCase_ : Optional[Any] = [(x, self.encoder_decoder_mask(lowerCAmelCase__ ,lowerCAmelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCAmelCase_ : Optional[int] = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 )
lowerCAmelCase_ : Optional[int] = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 )
for lyr in self.decoders:
lowerCAmelCase_ : Dict = lyr(
lowerCAmelCase__ ,conditioning_emb=lowerCAmelCase__ ,encoder_hidden_states=lowerCAmelCase__ ,encoder_attention_mask=lowerCAmelCase__ ,)[0]
lowerCAmelCase_ : int = self.decoder_norm(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = self.post_dropout(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = self.spec_out(lowerCAmelCase__ )
return spec_out
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Optional[Any]=1e-6 ) -> Dict:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase__ ,d_kv=lowerCAmelCase__ ,num_heads=lowerCAmelCase__ ,dropout_rate=lowerCAmelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase__ ,d_kv=lowerCAmelCase__ ,num_heads=lowerCAmelCase__ ,dropout_rate=lowerCAmelCase__ ,layer_norm_epsilon=lowerCAmelCase__ ,) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase__ ,d_ff=lowerCAmelCase__ ,dropout_rate=lowerCAmelCase__ ,layer_norm_epsilon=lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : List[Any]=None ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : Dict=None ,lowerCAmelCase__ : Optional[int]=None ,) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.layer[0](
lowerCAmelCase__ ,conditioning_emb=lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,)
if encoder_hidden_states is not None:
lowerCAmelCase_ : List[Any] = torch.where(encoder_attention_mask > 0 ,0 ,-1e1_0 ).to(
encoder_hidden_states.dtype )
lowerCAmelCase_ : Tuple = self.layer[1](
lowerCAmelCase__ ,key_value_states=lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,)
# Apply Film Conditional Feed Forward layer
lowerCAmelCase_ : Optional[Any] = self.layer[-1](lowerCAmelCase__ ,lowerCAmelCase__ )
return (hidden_states,)
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Any ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : Optional[Any] = TaLayerNorm(lowerCAmelCase__ )
lowerCAmelCase_ : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = Attention(query_dim=lowerCAmelCase__ ,heads=lowerCAmelCase__ ,dim_head=lowerCAmelCase__ ,out_bias=lowerCAmelCase__ ,scale_qk=lowerCAmelCase__ )
lowerCAmelCase_ : str = nn.Dropout(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[Any]=None ,lowerCAmelCase__ : List[Any]=None ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = self.layer_norm(lowerCAmelCase__ )
if conditioning_emb is not None:
lowerCAmelCase_ : Optional[int] = self.FiLMLayer(lowerCAmelCase__ ,lowerCAmelCase__ )
# Self-attention block
lowerCAmelCase_ : Any = self.attention(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = hidden_states + self.dropout(lowerCAmelCase__ )
return hidden_states
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Dict ) -> Tuple:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : Tuple = Attention(query_dim=lowerCAmelCase__ ,heads=lowerCAmelCase__ ,dim_head=lowerCAmelCase__ ,out_bias=lowerCAmelCase__ ,scale_qk=lowerCAmelCase__ )
lowerCAmelCase_ : str = TaLayerNorm(lowerCAmelCase__ ,eps=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = nn.Dropout(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[str]=None ,lowerCAmelCase__ : int=None ,) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.layer_norm(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = self.attention(
lowerCAmelCase__ ,encoder_hidden_states=lowerCAmelCase__ ,attention_mask=attention_mask.squeeze(1 ) ,)
lowerCAmelCase_ : int = hidden_states + self.dropout(lowerCAmelCase__ )
return layer_output
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : List[str] = TaDenseGatedActDense(d_model=lowerCAmelCase__ ,d_ff=lowerCAmelCase__ ,dropout_rate=lowerCAmelCase__ )
lowerCAmelCase_ : Any = TaFiLMLayer(in_features=d_model * 4 ,out_features=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = TaLayerNorm(lowerCAmelCase__ ,eps=lowerCAmelCase__ )
lowerCAmelCase_ : Any = nn.Dropout(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : List[Any]=None ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : str = self.layer_norm(lowerCAmelCase__ )
if conditioning_emb is not None:
lowerCAmelCase_ : Optional[int] = self.film(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = self.DenseReluDense(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = hidden_states + self.dropout(lowerCAmelCase__ )
return hidden_states
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : Optional[int] = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCAmelCase_ : Any = nn.Linear(lowerCAmelCase__ ,lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCAmelCase_ : str = nn.Dropout(lowerCAmelCase__ )
lowerCAmelCase_ : int = NewGELUActivation()
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : List[str] = self.act(self.wi_a(lowerCAmelCase__ ) )
lowerCAmelCase_ : Any = self.wi_a(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = hidden_gelu * hidden_linear
lowerCAmelCase_ : Optional[Any] = self.dropout(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = self.wo(lowerCAmelCase__ )
return hidden_states
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Union[str, Any]=1e-6 ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : List[str] = nn.Parameter(torch.ones(lowerCAmelCase__ ) )
lowerCAmelCase_ : str = eps
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=lowerCAmelCase__ )
lowerCAmelCase_ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCAmelCase_ : Any = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __snake_case ( nn.Module ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(lowerCAmelCase__ ,3.0 )) ))
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : Union[str, Any] = nn.Linear(lowerCAmelCase__ ,out_features * 2 ,bias=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.scale_bias(lowerCAmelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ : int = torch.chunk(lowerCAmelCase__ ,2 ,-1 )
lowerCAmelCase_ : str = x * (1 + scale) + shift
return x
| 683 |
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683 | 1 |
import math
def UpperCamelCase ( snake_case__ , snake_case__):
return math.pow(snake_case__ , 2) - a
def UpperCamelCase ( snake_case__):
return 2 * x
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = 2.0
while start <= a:
lowerCAmelCase_ : Optional[int] = math.pow(snake_case__ , 2)
return start
def UpperCamelCase ( snake_case__ , snake_case__ = 99_99 , snake_case__ = 0.00_000_000_000_001):
if a < 0:
raise ValueError("math domain error")
lowerCAmelCase_ : Dict = get_initial_point(snake_case__)
for _ in range(snake_case__):
lowerCAmelCase_ : Optional[Any] = value
lowerCAmelCase_ : List[Any] = value - fx(snake_case__ , snake_case__) / fx_derivative(snake_case__)
if abs(prev_value - value) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor")
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor")
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor")
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : float ) -> float:
'''simple docstring'''
return 0.0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = min([-20, np.min(fft_results[1 : samplerate // 2 - 1])])
lowerCAmelCase_ : List[str] = max([20, np.max(fft_results[1 : samplerate // 2 - 1])])
return lowest, highest
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = 5_12
lowerCAmelCase_ : Union[str, Any] = [1] + [0] * (size - 1)
lowerCAmelCase_ : Any = [filter_type.process(snake_case__) for item in inputs]
lowerCAmelCase_ : Optional[int] = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCAmelCase_ : Optional[Any] = np.abs(np.fft.fft(snake_case__))
lowerCAmelCase_ : Any = 20 * np.logaa(snake_case__)
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1)
plt.xlabel("Frequency (Hz)")
plt.xscale("log")
# Display within reasonable bounds
lowerCAmelCase_ : Optional[Any] = get_bounds(snake_case__ , snake_case__)
plt.ylim(max([-80, bounds[0]]) , min([80, bounds[1]]))
plt.ylabel("Gain (dB)")
plt.plot(snake_case__)
plt.show()
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = 5_12
lowerCAmelCase_ : Tuple = [1] + [0] * (size - 1)
lowerCAmelCase_ : Optional[Any] = [filter_type.process(snake_case__) for item in inputs]
lowerCAmelCase_ : str = [0] * (samplerate - size) # zero-padding
outputs += filler
lowerCAmelCase_ : str = np.angle(np.fft.fft(snake_case__))
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1)
plt.xlabel("Frequency (Hz)")
plt.xscale("log")
plt.ylim(-2 * pi , 2 * pi)
plt.ylabel("Phase shift (Radians)")
plt.plot(np.unwrap(snake_case__ , -2 * pi))
plt.show()
| 683 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
UpperCamelCase_ = Features({'text': Value('string' )} )
UpperCamelCase_ = Features({'summary': Value('string' )} )
UpperCamelCase_ = "text"
UpperCamelCase_ = "summary"
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 683 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = HfArgumentParser(snake_case__)
lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 683 | 1 |
_lowercase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCamelCase ( snake_case__):
assert type(snake_case__) in (int, float) and decimal == int(snake_case__)
lowerCAmelCase_ : Optional[Any] = int(snake_case__)
lowerCAmelCase_ : Tuple = ""
lowerCAmelCase_ : str = False
if decimal < 0:
lowerCAmelCase_ : Tuple = True
decimal *= -1
while decimal > 0:
lowerCAmelCase_ , lowerCAmelCase_ : Any = divmod(snake_case__ , 16)
lowerCAmelCase_ : Dict = values[remainder] + hexadecimal
lowerCAmelCase_ : List[str] = "0x" + hexadecimal
if negative:
lowerCAmelCase_ : Optional[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 |
_lowercase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCamelCase ( snake_case__):
assert type(snake_case__) in (int, float) and decimal == int(snake_case__)
lowerCAmelCase_ : Optional[Any] = int(snake_case__)
lowerCAmelCase_ : Tuple = ""
lowerCAmelCase_ : str = False
if decimal < 0:
lowerCAmelCase_ : Tuple = True
decimal *= -1
while decimal > 0:
lowerCAmelCase_ , lowerCAmelCase_ : Any = divmod(snake_case__ , 16)
lowerCAmelCase_ : Dict = values[remainder] + hexadecimal
lowerCAmelCase_ : List[str] = "0x" + hexadecimal
if negative:
lowerCAmelCase_ : Optional[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase = logging.get_logger(__name__)
# General docstring
_lowercase = '''RegNetConfig'''
# Base docstring
_lowercase = '''facebook/regnet-y-040'''
_lowercase = [1, 1088, 7, 7]
# Image classification docstring
_lowercase = '''facebook/regnet-y-040'''
_lowercase = '''tabby, tabby cat'''
_lowercase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int = 3 ,lowerCAmelCase__ : int = 1 ,lowerCAmelCase__ : int = 1 ,lowerCAmelCase__ : Optional[str] = "relu" ,) -> str:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : Union[str, Any] = nn.Convad(
lowerCAmelCase__ ,lowerCAmelCase__ ,kernel_size=lowerCAmelCase__ ,stride=lowerCAmelCase__ ,padding=kernel_size // 2 ,groups=lowerCAmelCase__ ,bias=lowerCAmelCase__ ,)
lowerCAmelCase_ : str = nn.BatchNormad(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.convolution(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = self.normalization(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = self.activation(lowerCAmelCase__ )
return hidden_state
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,lowerCAmelCase__ : RegNetConfig ) -> Dict:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : List[str] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
lowerCAmelCase_ : Optional[int] = config.num_channels
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
lowerCAmelCase_ : Dict = self.embedder(lowerCAmelCase__ )
return hidden_state
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int = 2 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : Union[str, Any] = nn.Convad(lowerCAmelCase__ ,lowerCAmelCase__ ,kernel_size=1 ,stride=lowerCAmelCase__ ,bias=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = nn.BatchNormad(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = self.convolution(lowerCAmelCase__ )
lowerCAmelCase_ : str = self.normalization(lowerCAmelCase__ )
return hidden_state
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
lowerCAmelCase_ : Optional[int] = nn.Sequential(
nn.Convad(lowerCAmelCase__ ,lowerCAmelCase__ ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(lowerCAmelCase__ ,lowerCAmelCase__ ,kernel_size=1 ) ,nn.Sigmoid() ,)
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = self.pooler(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.attention(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = hidden_state * attention
return hidden_state
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowerCAmelCase__ : RegNetConfig ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int = 1 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : Any = in_channels != out_channels or stride != 1
lowerCAmelCase_ : str = max(1 ,out_channels // config.groups_width )
lowerCAmelCase_ : List[Any] = (
RegNetShortCut(lowerCAmelCase__ ,lowerCAmelCase__ ,stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase_ : Tuple = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ ,lowerCAmelCase__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCAmelCase__ ,lowerCAmelCase__ ,stride=lowerCAmelCase__ ,groups=lowerCAmelCase__ ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCAmelCase__ ,lowerCAmelCase__ ,kernel_size=1 ,activation=lowerCAmelCase__ ) ,)
lowerCAmelCase_ : Union[str, Any] = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : str = hidden_state
lowerCAmelCase_ : str = self.layer(lowerCAmelCase__ )
lowerCAmelCase_ : str = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowerCAmelCase_ : Optional[int] = self.activation(lowerCAmelCase__ )
return hidden_state
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : int ,lowerCAmelCase__ : RegNetConfig ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int = 1 ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : Optional[int] = in_channels != out_channels or stride != 1
lowerCAmelCase_ : Union[str, Any] = max(1 ,out_channels // config.groups_width )
lowerCAmelCase_ : Optional[int] = (
RegNetShortCut(lowerCAmelCase__ ,lowerCAmelCase__ ,stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowerCAmelCase__ ,lowerCAmelCase__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(lowerCAmelCase__ ,lowerCAmelCase__ ,stride=lowerCAmelCase__ ,groups=lowerCAmelCase__ ,activation=config.hidden_act ) ,RegNetSELayer(lowerCAmelCase__ ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(lowerCAmelCase__ ,lowerCAmelCase__ ,kernel_size=1 ,activation=lowerCAmelCase__ ) ,)
lowerCAmelCase_ : Dict = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = hidden_state
lowerCAmelCase_ : List[str] = self.layer(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
lowerCAmelCase_ : List[str] = self.activation(lowerCAmelCase__ )
return hidden_state
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ,lowerCAmelCase__ : RegNetConfig ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int = 2 ,lowerCAmelCase__ : int = 2 ,) -> int:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : List[Any] = RegNetXLayer if config.layer_type == "x" else RegNetYLayer
lowerCAmelCase_ : Union[str, Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,stride=lowerCAmelCase__ ,) ,*[layer(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) for _ in range(depth - 1 )] ,)
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.layers(lowerCAmelCase__ )
return hidden_state
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ,lowerCAmelCase__ : RegNetConfig ) -> Dict:
'''simple docstring'''
super().__init__()
lowerCAmelCase_ : List[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCAmelCase__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
lowerCAmelCase_ : List[Any] = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ ,config.depths[1:] ):
self.stages.append(RegNetStage(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,depth=lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Tensor ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
lowerCAmelCase_ : Dict = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase_ : Optional[Any] = hidden_states + (hidden_state,)
lowerCAmelCase_ : int = stage_module(lowerCAmelCase__ )
if output_hidden_states:
lowerCAmelCase_ : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase__ ,hidden_states=lowerCAmelCase__ )
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = RegNetConfig
UpperCamelCase_ = 'regnet'
UpperCamelCase_ = 'pixel_values'
UpperCamelCase_ = True
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Dict ) -> str:
'''simple docstring'''
if isinstance(lowerCAmelCase__ ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode="fan_out" ,nonlinearity="relu" )
elif isinstance(lowerCAmelCase__ ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : List[str]=False ) -> str:
'''simple docstring'''
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : str = value
_lowercase = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_lowercase = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , snake_case__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : List[str] ) -> int:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = config
lowerCAmelCase_ : Union[str, Any] = RegNetEmbeddings(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = RegNetEncoder(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=lowerCAmelCase__ ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Tensor ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
lowerCAmelCase_ : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Optional[int] = self.embedder(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = self.encoder(
lowerCAmelCase__ ,output_hidden_states=lowerCAmelCase__ ,return_dict=lowerCAmelCase__ )
lowerCAmelCase_ : str = encoder_outputs[0]
lowerCAmelCase_ : int = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ ,pooler_output=lowerCAmelCase__ ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , snake_case__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : Any ,lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowerCAmelCase_ : str = config.num_labels
lowerCAmelCase_ : Dict = RegNetModel(lowerCAmelCase__ )
# classification head
lowerCAmelCase_ : Optional[Any] = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=lowerCAmelCase__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : Optional[torch.FloatTensor] = None ,lowerCAmelCase__ : Optional[torch.LongTensor] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[bool] = None ,) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : int = self.regnet(lowerCAmelCase__ ,output_hidden_states=lowerCAmelCase__ ,return_dict=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase_ : Tuple = self.classifier(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ : List[str] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ : Dict = "single_label_classification"
else:
lowerCAmelCase_ : str = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCAmelCase_ : Dict = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ : str = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
lowerCAmelCase_ : List[Any] = loss_fct(lowerCAmelCase__ ,lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ : Tuple = CrossEntropyLoss()
lowerCAmelCase_ : List[Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ : int = BCEWithLogitsLoss()
lowerCAmelCase_ : Optional[int] = loss_fct(lowerCAmelCase__ ,lowerCAmelCase__ )
if not return_dict:
lowerCAmelCase_ : Dict = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ ,logits=lowerCAmelCase__ ,hidden_states=outputs.hidden_states )
| 683 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase = ['''text''', '''image''', '''audio''']
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((5_12, 5_12)))
elif input_type == "audio":
inputs.append(torch.ones(30_00))
elif isinstance(snake_case__ , snake_case__):
inputs.append(create_inputs(snake_case__))
else:
raise ValueError(F'''Invalid type requested: {input_type}''')
return inputs
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = []
for output in outputs:
if isinstance(snake_case__ , (str, AgentText)):
output_types.append("text")
elif isinstance(snake_case__ , (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(snake_case__ , (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(F'''Invalid output: {output}''')
return output_types
@is_tool_test
class __snake_case :
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
lowerCAmelCase_ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase_ : Optional[int] = [outputs]
self.assertListEqual(output_types(lowerCAmelCase__ ) ,self.tool.outputs )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : str = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase__ ,self.tool.outputs ):
lowerCAmelCase_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = []
for _input, input_type in zip(lowerCAmelCase__ ,self.tool.inputs ):
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : int = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
| 683 | 1 |
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = len(snake_case__)
lowerCAmelCase_ : str = len(matrix[0])
lowerCAmelCase_ : Dict = min(snake_case__ , snake_case__)
for row in range(snake_case__):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , snake_case__):
lowerCAmelCase_ : Tuple = matrix[col][row] / matrix[row][row]
for i in range(snake_case__ , snake_case__):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase_ : Union[str, Any] = True
for i in range(row + 1 , snake_case__):
if matrix[i][row] != 0:
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = matrix[i], matrix[row]
lowerCAmelCase_ : Union[str, Any] = False
break
if reduce:
rank -= 1
for i in range(snake_case__):
lowerCAmelCase_ : List[str] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 |
import pytest
_lowercase = '''__dummy_dataset1__'''
_lowercase = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = dataset_loading_script_name
lowerCAmelCase_ : List[str] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=snake_case__)
lowerCAmelCase_ : List[Any] = script_dir / F'''{script_name}.py'''
with open(snake_case__ , "w") as f:
f.write(snake_case__)
return str(snake_case__)
| 683 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_lowercase = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = SavedModel()
lowerCAmelCase_ : List[Any] = []
with open(os.path.join(snake_case__ , "utils" , "tf_ops" , "onnx.json")) as f:
lowerCAmelCase_ : List[Any] = json.load(snake_case__)["opsets"]
for i in range(1 , opset + 1):
onnx_ops.extend(onnx_opsets[str(snake_case__)])
with open(snake_case__ , "rb") as f:
saved_model.ParseFromString(f.read())
lowerCAmelCase_ : Tuple = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node)
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def)
# Convert to list, sorted if you want
lowerCAmelCase_ : Optional[Any] = sorted(snake_case__)
lowerCAmelCase_ : List[str] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(snake_case__)
if strict and len(snake_case__) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops)
elif len(snake_case__) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''')
print(*snake_case__ , sep="\n")
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''')
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
_lowercase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 683 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = CodeGenTokenizer
UpperCamelCase_ = CodeGenTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = {'add_prefix_space': True}
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : int = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : List[Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = "lower newer"
lowerCAmelCase_ : Tuple = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase_ : Dict = "lower newer"
lowerCAmelCase_ : Dict = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Tuple = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing the unknown token
lowerCAmelCase_ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,*lowerCAmelCase__ : List[str] ,**lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Any=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
# Simple input
lowerCAmelCase_ : int = "This is a simple input"
lowerCAmelCase_ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : str = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : Dict = tokenizer.pad_token_id
lowerCAmelCase_ : Union[str, Any] = tokenizer(lowerCAmelCase__ ,padding="max_length" ,max_length=30 ,return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
lowerCAmelCase_ : Any = tokenizer(*lowerCAmelCase__ ,padding="max_length" ,max_length=60 ,return_tensors="np" )
lowerCAmelCase_ : Optional[int] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = "$$$"
lowerCAmelCase_ : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowerCAmelCase__ ,add_bos_token=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : int = tokenizer.bos_token_id
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ )
self.assertEqual(out_s.input_ids[0] ,lowerCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : List[str] = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowerCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowerCAmelCase_ : str = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCAmelCase_ : int = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase__ )
lowerCAmelCase_ : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.decode(lowerCAmelCase__ ,truncate_before_pattern=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
| 683 | 1 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Tuple=2 ,lowerCAmelCase__ : Dict=56 ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : str=True ,lowerCAmelCase__ : int=True ,lowerCAmelCase__ : str=99 ,lowerCAmelCase__ : List[Any]=32 ,lowerCAmelCase__ : Optional[int]=2 ,lowerCAmelCase__ : Union[str, Any]=2 ,lowerCAmelCase__ : Tuple=7 ,lowerCAmelCase__ : List[str]="gelu_new" ,lowerCAmelCase__ : Dict=0.1 ,lowerCAmelCase__ : Optional[int]=0.1 ,lowerCAmelCase__ : List[str]=5_12 ,lowerCAmelCase__ : Tuple=16 ,lowerCAmelCase__ : Union[str, Any]=2 ,lowerCAmelCase__ : Optional[Any]=0.02 ,lowerCAmelCase__ : int=4 ,lowerCAmelCase__ : int="block_sparse" ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : int=False ,lowerCAmelCase__ : str=2 ,lowerCAmelCase__ : Dict=3 ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : Tuple = seq_length
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : List[Any] = use_attention_mask
lowerCAmelCase_ : Any = use_token_type_ids
lowerCAmelCase_ : Union[str, Any] = use_labels
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = type_vocab_size
lowerCAmelCase_ : Dict = type_sequence_label_size
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : Any = num_choices
lowerCAmelCase_ : Tuple = rescale_embeddings
lowerCAmelCase_ : str = attention_type
lowerCAmelCase_ : Optional[int] = use_bias
lowerCAmelCase_ : Any = block_size
lowerCAmelCase_ : int = num_random_blocks
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase_ : Optional[int] = None
if self.use_attention_mask:
lowerCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
lowerCAmelCase_ : Tuple = BigBirdConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowerCAmelCase__ ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,block_size=self.block_size ,num_random_blocks=self.num_random_blocks ,use_bias=self.use_bias ,rescale_embeddings=self.rescale_embeddings ,)
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self : str ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = config_and_inputs
lowerCAmelCase_ : Optional[Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase_ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
super().test_hidden_states_output()
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : int = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(lowerCAmelCase__ : Dict ,lowerCAmelCase__ : Optional[int]=None ,**lowerCAmelCase__ : Optional[int] ):
return model(input_ids=lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ )
with self.subTest("JIT Enabled" ):
lowerCAmelCase_ : Union[str, Any] = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
lowerCAmelCase_ : Union[str, Any] = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) ,len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape ,output.shape )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : List[Any]=1e-5 ,lowerCAmelCase__ : int="outputs" ,lowerCAmelCase__ : Union[str, Any]=None ) -> Tuple:
'''simple docstring'''
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
| 683 |
from __future__ import annotations
from random import random
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : int | None = None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Any = random()
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Any ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} ,indent=1 )
def __str__( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = str(self.value ) + " "
lowerCAmelCase_ : List[Any] = str(self.left or "" )
lowerCAmelCase_ : Union[str, Any] = str(self.right or "" )
return value + left + right
def UpperCamelCase ( snake_case__ , snake_case__):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase_ , lowerCAmelCase_ : Any = split(root.left , snake_case__)
return left, root
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = split(root.right , snake_case__)
return root, right
def UpperCamelCase ( snake_case__ , snake_case__):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase_ : Dict = merge(left.right , snake_case__)
return left
else:
lowerCAmelCase_ : List[str] = merge(snake_case__ , right.left)
return right
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = Node(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = split(snake_case__ , snake_case__)
return merge(merge(snake_case__ , snake_case__) , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = split(snake_case__ , value - 1)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = split(snake_case__ , snake_case__)
return merge(snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__):
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=",")
inorder(root.right)
def UpperCamelCase ( snake_case__ , snake_case__):
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase_ : List[str] = insert(snake_case__ , int(arg[1:]))
elif arg[0] == "-":
lowerCAmelCase_ : Optional[int] = erase(snake_case__ , int(arg[1:]))
else:
print("Unknown command")
return root
def UpperCamelCase ( ):
lowerCAmelCase_ : str = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. ")
lowerCAmelCase_ : str = input()
while args != "q":
lowerCAmelCase_ : int = interact_treap(snake_case__ , snake_case__)
print(snake_case__)
lowerCAmelCase_ : str = input()
print("good by!")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 1 |
def UpperCamelCase ( snake_case__ , snake_case__):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCAmelCase_ : Optional[int] = (boundary[1] - boundary[0]) / steps
lowerCAmelCase_ : Optional[int] = boundary[0]
lowerCAmelCase_ : List[str] = boundary[1]
lowerCAmelCase_ : Union[str, Any] = make_points(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Union[str, Any] = 0.0
y += (h / 2.0) * f(snake_case__)
for i in x_i:
# print(i)
y += h * f(snake_case__)
y += (h / 2.0) * f(snake_case__)
return y
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : int = a + h
while x < (b - h):
yield x
lowerCAmelCase_ : Optional[Any] = x + h
def UpperCamelCase ( snake_case__): # enter your function here
lowerCAmelCase_ : Any = (x - 0) * (x - 0)
return y
def UpperCamelCase ( ):
lowerCAmelCase_ : Union[str, Any] = 0.0 # Lower bound of integration
lowerCAmelCase_ : List[str] = 1.0 # Upper bound of integration
lowerCAmelCase_ : List[Any] = 10.0 # define number of steps or resolution
lowerCAmelCase_ : Union[str, Any] = [a, b] # define boundary of integration
lowerCAmelCase_ : Any = method_a(snake_case__ , snake_case__)
print(F'''y = {y}''')
if __name__ == "__main__":
main()
| 683 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowercase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {f"funnel-transformer/{name}": 512 for name in _model_names}
_lowercase = {f"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = 2
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : List[str]="<unk>" ,lowerCAmelCase__ : int="<sep>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : List[str]="<cls>" ,lowerCAmelCase__ : Optional[int]="<mask>" ,lowerCAmelCase__ : Union[str, Any]="<s>" ,lowerCAmelCase__ : List[str]="</s>" ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : List[Any]="##" ,**lowerCAmelCase__ : int ,) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,clean_text=lowerCAmelCase__ ,tokenize_chinese_chars=lowerCAmelCase__ ,strip_accents=lowerCAmelCase__ ,wordpieces_prefix=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[int] = getattr(lowerCAmelCase__ ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : List[str] = strip_accents
lowerCAmelCase_ : Any = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase_ : int = do_lower_case
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ,lowerCAmelCase__ : str=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : str = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 683 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCamelCase ( snake_case__ , snake_case__ , **snake_case__):
lowerCAmelCase_ : int = AutoConfig.from_pretrained(snake_case__ , **snake_case__)
lowerCAmelCase_ : Any = AutoModelForSeqaSeqLM.from_config(snake_case__)
model.save_pretrained(snake_case__)
AutoTokenizer.from_pretrained(snake_case__).save_pretrained(snake_case__)
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 683 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__):
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested")
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested")
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested")
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment")
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate")
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule")
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__)
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ : int = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCAmelCase_ : List[Any] = 0
# Doctest custom flag to ignore output.
_lowercase = doctest.register_optionflag('''IGNORE_RESULT''')
_lowercase = doctest.OutputChecker
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
_lowercase = CustomOutputChecker
_lowercase = HfDoctestModule
_lowercase = HfDocTestParser
| 683 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_lowercase = None
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
_lowercase = '''▁'''
# Segments (not really needed)
_lowercase = 0
_lowercase = 1
_lowercase = 2
_lowercase = 3
_lowercase = 4
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = 'left'
UpperCamelCase_ = XLNetTokenizer
def __init__( self : Dict ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : int=False ,lowerCAmelCase__ : List[Any]=True ,lowerCAmelCase__ : int=False ,lowerCAmelCase__ : str="<s>" ,lowerCAmelCase__ : List[str]="</s>" ,lowerCAmelCase__ : Dict="<unk>" ,lowerCAmelCase__ : Optional[int]="<sep>" ,lowerCAmelCase__ : int="<pad>" ,lowerCAmelCase__ : Optional[Any]="<cls>" ,lowerCAmelCase__ : Union[str, Any]="<mask>" ,lowerCAmelCase__ : Tuple=["<eop>", "<eod>"] ,**lowerCAmelCase__ : Dict ,) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,remove_space=lowerCAmelCase__ ,keep_accents=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,additional_special_tokens=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Union[str, Any] = 3
lowerCAmelCase_ : List[str] = do_lower_case
lowerCAmelCase_ : Tuple = remove_space
lowerCAmelCase_ : List[str] = keep_accents
lowerCAmelCase_ : Optional[Any] = vocab_file
lowerCAmelCase_ : Tuple = False if not self.vocab_file else True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Any = [self.sep_token_id]
lowerCAmelCase_ : Any = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file ,lowerCAmelCase__ )
return (out_vocab_file,)
| 683 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 1 |
import functools
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = len(snake_case__)
lowerCAmelCase_ : Tuple = len(snake_case__)
@functools.cache
def min_distance(snake_case__ , snake_case__) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ : int = int(worda[indexa] != worda[indexa]) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case__) , 1 + min_distance(snake_case__ , indexa + 1) , diff + min_distance(indexa + 1 , indexa + 1) , )
return min_distance(0 , 0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
lowerCAmelCase_ : List[str] = []
# custom device map
if isinstance(snake_case__ , snake_case__) and len(device_map.keys()) > 1:
lowerCAmelCase_ : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : Union[str, Any] = get_keys_to_not_convert(snake_case__)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__)
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__)
# compatibility with peft
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : List[str] = load_in_abit
lowerCAmelCase_ : Optional[int] = get_parameter_device(snake_case__)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
# convert param to the right dtype
lowerCAmelCase_ : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ : Optional[int] = name.replace(".weight" , "").replace(".bias" , "")
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , snake_case__ , snake_case__)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(snake_case__):
param.to(snake_case__)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ : str = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
lowerCAmelCase_ : Optional[int] = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None):
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(snake_case__ , snake_case__):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Union[str, Any] = special_dtypes
lowerCAmelCase_ : Union[str, Any] = no_split_module_classes
lowerCAmelCase_ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
snake_case__ , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case__ , **snake_case__ , )
lowerCAmelCase_ : Tuple = max_memory
lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(snake_case__ , **snake_case__)
if isinstance(snake_case__ , snake_case__):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
if modules_to_not_convert is None:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : Optional[int] = []
current_key_name.append(snake_case__)
if isinstance(snake_case__ , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : Optional[int] = ".".join(snake_case__)
lowerCAmelCase_ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
lowerCAmelCase_ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : Any = module.bias.data
bnb_module.requires_grad_(snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = True
if len(list(module.children())) > 0:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def UpperCamelCase ( snake_case__):
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ : List[Any] = deepcopy(snake_case__) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Dict = find_tied_parameters(snake_case__)
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ : Optional[Any] = sum(snake_case__ , [])
lowerCAmelCase_ : List[Any] = len(snake_case__) > 0
# Check if it is a base model
lowerCAmelCase_ : List[str] = False
if hasattr(snake_case__ , "base_model_prefix"):
lowerCAmelCase_ : Tuple = not hasattr(snake_case__ , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : Union[str, Any] = list(model.named_children())
lowerCAmelCase_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Any = set(snake_case__) - set(snake_case__)
lowerCAmelCase_ : Tuple = list(set(snake_case__)) + list(snake_case__)
# remove ".weight" from the keys
lowerCAmelCase_ : List[str] = [".weight", ".bias"]
lowerCAmelCase_ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : str = name.replace(snake_case__ , "")
filtered_module_names.append(snake_case__)
return filtered_module_names
def UpperCamelCase ( snake_case__):
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit):
return True
return False
def UpperCamelCase ( snake_case__):
return next(parameter.parameters()).device
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__)
lowerCAmelCase_ : str = param_name
lowerCAmelCase_ : Tuple = model
if "." in tensor_name:
lowerCAmelCase_ : Dict = tensor_name.split(".")
for split in splits[:-1]:
lowerCAmelCase_ : Any = getattr(snake_case__ , snake_case__)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Any = splits[-1]
# offload weights
lowerCAmelCase_ : List[Any] = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__)
offload_weight(snake_case__ , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__)
set_module_tensor_to_device(snake_case__ , snake_case__ , "meta" , dtype=snake_case__ , value=torch.empty(*param.size()))
| 683 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'is_longer']
def __init__( self : Optional[int] ,lowerCAmelCase__ : List[Any]=64 ,lowerCAmelCase__ : Any=4_80_00 ,lowerCAmelCase__ : Optional[Any]=4_80 ,lowerCAmelCase__ : List[str]=10 ,lowerCAmelCase__ : List[Any]=10_24 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : float = 0 ,lowerCAmelCase__ : float = 1_40_00 ,lowerCAmelCase__ : int = None ,lowerCAmelCase__ : str = "fusion" ,lowerCAmelCase__ : str = "repeatpad" ,**lowerCAmelCase__ : Union[str, Any] ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Optional[Any] = top_db
lowerCAmelCase_ : str = truncation
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : str = fft_window_size
lowerCAmelCase_ : Dict = (fft_window_size >> 1) + 1
lowerCAmelCase_ : Dict = hop_length
lowerCAmelCase_ : Any = max_length_s
lowerCAmelCase_ : int = max_length_s * sampling_rate
lowerCAmelCase_ : Optional[int] = sampling_rate
lowerCAmelCase_ : int = frequency_min
lowerCAmelCase_ : Optional[Any] = frequency_max
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm=lowerCAmelCase__ ,mel_scale="htk" ,)
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = spectrogram(
lowerCAmelCase__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCAmelCase__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
# randomly choose index for each part
lowerCAmelCase_ : str = np.random.choice(ranges[0] )
lowerCAmelCase_ : Optional[Any] = np.random.choice(ranges[1] )
lowerCAmelCase_ : Any = np.random.choice(ranges[2] )
lowerCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase_ : List[str] = torch.tensor(mel[None, None, :] )
lowerCAmelCase_ : List[Any] = torch.nn.functional.interpolate(
lowerCAmelCase__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = mel_shrink[0][0].numpy()
lowerCAmelCase_ : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase_ : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase_ : str = len(lowerCAmelCase__ ) - max_length
lowerCAmelCase_ : Any = np.random.randint(0 ,overflow + 1 )
lowerCAmelCase_ : Dict = waveform[idx : idx + max_length]
lowerCAmelCase_ : List[str] = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase_ : Tuple = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase_ : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase_ : Dict = np.stack([mel, mel, mel, mel] ,axis=0 )
lowerCAmelCase_ : int = False
else:
lowerCAmelCase_ : str = self._random_mel_fusion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase_ : List[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : int = np.stack(np.tile(lowerCAmelCase__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase_ : Optional[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Tuple = np.stack(np.tile(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = np.pad(lowerCAmelCase__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
lowerCAmelCase_ : int = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
lowerCAmelCase_ : str = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : str = None ,lowerCAmelCase__ : Optional[str] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCAmelCase__ : List[Any] ,) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ : List[str] = truncation if truncation is not None else self.truncation
lowerCAmelCase_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Dict = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : Tuple = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray(lowerCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase_ : Optional[Any] = [
self._get_input_mel(lowerCAmelCase__ ,max_length if max_length else self.nb_max_samples ,lowerCAmelCase__ ,lowerCAmelCase__ )
for waveform in raw_speech
]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : str = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase__ )
is_longer.append(lowerCAmelCase__ )
if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase_ : Any = np.random.randint(0 ,len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = True
if isinstance(input_mel[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase_ : List[Any] = [[longer] for longer in is_longer]
lowerCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
lowerCAmelCase_ : Dict = BatchFeature(lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : List[str] = input_features.convert_to_tensors(lowerCAmelCase__ )
return input_features
| 683 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'is_longer']
def __init__( self : Optional[int] ,lowerCAmelCase__ : List[Any]=64 ,lowerCAmelCase__ : Any=4_80_00 ,lowerCAmelCase__ : Optional[Any]=4_80 ,lowerCAmelCase__ : List[str]=10 ,lowerCAmelCase__ : List[Any]=10_24 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : float = 0 ,lowerCAmelCase__ : float = 1_40_00 ,lowerCAmelCase__ : int = None ,lowerCAmelCase__ : str = "fusion" ,lowerCAmelCase__ : str = "repeatpad" ,**lowerCAmelCase__ : Union[str, Any] ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Optional[Any] = top_db
lowerCAmelCase_ : str = truncation
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : str = fft_window_size
lowerCAmelCase_ : Dict = (fft_window_size >> 1) + 1
lowerCAmelCase_ : Dict = hop_length
lowerCAmelCase_ : Any = max_length_s
lowerCAmelCase_ : int = max_length_s * sampling_rate
lowerCAmelCase_ : Optional[int] = sampling_rate
lowerCAmelCase_ : int = frequency_min
lowerCAmelCase_ : Optional[Any] = frequency_max
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm=lowerCAmelCase__ ,mel_scale="htk" ,)
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = spectrogram(
lowerCAmelCase__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCAmelCase__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
# randomly choose index for each part
lowerCAmelCase_ : str = np.random.choice(ranges[0] )
lowerCAmelCase_ : Optional[Any] = np.random.choice(ranges[1] )
lowerCAmelCase_ : Any = np.random.choice(ranges[2] )
lowerCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase_ : List[str] = torch.tensor(mel[None, None, :] )
lowerCAmelCase_ : List[Any] = torch.nn.functional.interpolate(
lowerCAmelCase__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = mel_shrink[0][0].numpy()
lowerCAmelCase_ : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase_ : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase_ : str = len(lowerCAmelCase__ ) - max_length
lowerCAmelCase_ : Any = np.random.randint(0 ,overflow + 1 )
lowerCAmelCase_ : Dict = waveform[idx : idx + max_length]
lowerCAmelCase_ : List[str] = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase_ : Tuple = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase_ : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase_ : Dict = np.stack([mel, mel, mel, mel] ,axis=0 )
lowerCAmelCase_ : int = False
else:
lowerCAmelCase_ : str = self._random_mel_fusion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase_ : List[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : int = np.stack(np.tile(lowerCAmelCase__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase_ : Optional[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Tuple = np.stack(np.tile(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = np.pad(lowerCAmelCase__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
lowerCAmelCase_ : int = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
lowerCAmelCase_ : str = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : str = None ,lowerCAmelCase__ : Optional[str] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCAmelCase__ : List[Any] ,) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ : List[str] = truncation if truncation is not None else self.truncation
lowerCAmelCase_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Dict = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : Tuple = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray(lowerCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase_ : Optional[Any] = [
self._get_input_mel(lowerCAmelCase__ ,max_length if max_length else self.nb_max_samples ,lowerCAmelCase__ ,lowerCAmelCase__ )
for waveform in raw_speech
]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : str = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase__ )
is_longer.append(lowerCAmelCase__ )
if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase_ : Any = np.random.randint(0 ,len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = True
if isinstance(input_mel[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase_ : List[Any] = [[longer] for longer in is_longer]
lowerCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
lowerCAmelCase_ : Dict = BatchFeature(lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : List[str] = input_features.convert_to_tensors(lowerCAmelCase__ )
return input_features
| 683 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = iter(snake_case__)
while True:
lowerCAmelCase_ : List[str] = tuple(itertools.islice(snake_case__ , snake_case__))
if not chunk:
return
yield chunk
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Dict = "".join([c.upper() for c in dirty if c in string.ascii_letters])
lowerCAmelCase_ : str = ""
if len(snake_case__) < 2:
return dirty
for i in range(len(snake_case__) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(snake_case__) & 1:
clean += "X"
return clean
def UpperCamelCase ( snake_case__):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowerCAmelCase_ : Tuple = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowerCAmelCase_ : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(snake_case__)
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(snake_case__)
return table
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = generate_table(snake_case__)
lowerCAmelCase_ : List[Any] = prepare_input(snake_case__)
lowerCAmelCase_ : Optional[int] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case__ , 2):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = divmod(table.index(snake_case__) , 5)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = divmod(table.index(snake_case__) , 5)
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = generate_table(snake_case__)
lowerCAmelCase_ : Dict = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case__ , 2):
lowerCAmelCase_ , lowerCAmelCase_ : str = divmod(table.index(snake_case__) , 5)
lowerCAmelCase_ , lowerCAmelCase_ : int = divmod(table.index(snake_case__) , 5)
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 683 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowercase = Lock()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase_ : Optional[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase_ : Any = min(snake_case__ , snake_case__)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase_ : Dict = max(snake_case__ , snake_case__)
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase_ : Tuple = Pipe()
lowerCAmelCase_ : Optional[int] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
lowerCAmelCase_ : int = temp_rs
lowerCAmelCase_ : List[Any] = temp_rr
for i in range(1 , len(snake_case__) - 1):
lowerCAmelCase_ : Dict = Pipe()
lowerCAmelCase_ : List[str] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
lowerCAmelCase_ : Dict = temp_rs
lowerCAmelCase_ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__) - 1,
arr[len(snake_case__) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__)):
lowerCAmelCase_ : Union[str, Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = list(range(10 , 0 , -1))
print("Initial List")
print(*snake_case__)
lowerCAmelCase_ : Tuple = odd_even_transposition(snake_case__)
print("Sorted List\n")
print(*snake_case__)
if __name__ == "__main__":
main()
| 683 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = XGLMTokenizer
UpperCamelCase_ = XGLMTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = True
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : Dict = XGLMTokenizer(lowerCAmelCase__ ,keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = "<pad>"
lowerCAmelCase_ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) ,lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<s>" )
self.assertEqual(vocab_keys[1] ,"<pad>" )
self.assertEqual(len(lowerCAmelCase__ ) ,10_08 )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,10_08 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = XGLMTokenizer(lowerCAmelCase__ ,keep_accents=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase__ ,["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,[value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] ,)
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] ,)
lowerCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowerCAmelCase_ : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ ,[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] ,)
@cached_property
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ ,f.name )
lowerCAmelCase_ : str = XGLMTokenizer(f.name ,keep_accents=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = pickle.dumps(lowerCAmelCase__ )
pickle.loads(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : int = self.get_tokenizer()
lowerCAmelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
lowerCAmelCase_ : Dict = tokenizer.tokenize(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : Optional[int] = tokenizer.encode(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : str ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = "Hello World!"
lowerCAmelCase_ : Tuple = [2, 3_12_27, 44_47, 35]
self.assertListEqual(lowerCAmelCase__ ,self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
lowerCAmelCase_ : Dict = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(lowerCAmelCase__ ,self.big_tokenizer.encode(lowerCAmelCase__ ) )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = {
"input_ids": [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ ,model_name="facebook/xglm-564M" ,padding=lowerCAmelCase__ ,)
| 683 |
from typing import Any
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict = {}
lowerCAmelCase_ : dict = {}
for state in states_space:
lowerCAmelCase_ : List[Any] = observations_space[0]
lowerCAmelCase_ : int = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__)):
lowerCAmelCase_ : List[Any] = observations_space[o]
lowerCAmelCase_ : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Tuple = -1
for k_state in states_space:
lowerCAmelCase_ : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Optional[Any] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : Any = arg_max
# The final observation
lowerCAmelCase_ : List[Any] = observations_space[len(snake_case__) - 1]
# argmax for given final observation
lowerCAmelCase_ : List[str] = ""
lowerCAmelCase_ : List[str] = -1
for k_state in states_space:
lowerCAmelCase_ : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Tuple = k_state
lowerCAmelCase_ : str = arg_max
# Process pointers backwards
lowerCAmelCase_ : int = last_state
lowerCAmelCase_ : int = []
for o in range(len(snake_case__) - 1 , -1 , -1):
result.append(snake_case__)
lowerCAmelCase_ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__)
_validate_dicts(
snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError("There's an empty parameter")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_list(snake_case__ , "observations_space")
_validate_list(snake_case__ , "states_space")
def UpperCamelCase ( snake_case__ , snake_case__):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list'''
raise ValueError(snake_case__)
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
_validate_dict(snake_case__ , "initial_probabilities" , snake_case__)
_validate_nested_dict(snake_case__ , "transition_probabilities")
_validate_nested_dict(snake_case__ , "emission_probabilities")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_dict(_object , snake_case__ , snake_case__)
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object):
lowerCAmelCase_ : Dict = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object.values()):
lowerCAmelCase_ : Union[str, Any] = "nested dictionary " if nested else ""
lowerCAmelCase_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 1 |
def UpperCamelCase ( snake_case__ , snake_case__):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowercase = None
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case__ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def UpperCamelCase ( snake_case__):
def remove_articles(snake_case__):
return ARTICLES_REGEX.sub(" " , snake_case__)
def white_space_fix(snake_case__):
return " ".join(text.split())
def remove_punc(snake_case__):
lowerCAmelCase_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def UpperCamelCase ( snake_case__):
if not s:
return []
return normalize_answer(snake_case__).split()
def UpperCamelCase ( snake_case__ , snake_case__):
return int(normalize_answer(snake_case__) == normalize_answer(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = get_tokens(snake_case__)
lowerCAmelCase_ : Union[str, Any] = get_tokens(snake_case__)
lowerCAmelCase_ : Any = collections.Counter(snake_case__) & collections.Counter(snake_case__)
lowerCAmelCase_ : Dict = sum(common.values())
if len(snake_case__) == 0 or len(snake_case__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : int = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : int = qa["id"]
lowerCAmelCase_ : Any = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCAmelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : Any = max(compute_exact(snake_case__ , snake_case__) for a in gold_answers)
lowerCAmelCase_ : Optional[Any] = max(compute_fa(snake_case__ , snake_case__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = {}
for qid, s in scores.items():
lowerCAmelCase_ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : List[str] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase_ : Union[str, Any] = s
return new_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
if not qid_list:
lowerCAmelCase_ : Any = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCAmelCase_ : Tuple = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
plt.step(snake_case__ , snake_case__ , color="b" , alpha=0.2 , where="post")
plt.fill_between(snake_case__ , snake_case__ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(snake_case__)
plt.savefig(snake_case__)
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
lowerCAmelCase_ : List[Any] = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Tuple = [1.0]
lowerCAmelCase_ : Tuple = [0.0]
lowerCAmelCase_ : Dict = 0.0
for i, qid in enumerate(snake_case__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : str = true_pos / float(i + 1)
lowerCAmelCase_ : Union[str, Any] = true_pos / float(snake_case__)
if i == len(snake_case__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__)
recalls.append(snake_case__)
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return {"ap": 100.0 * avg_prec}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
if out_image_dir and not os.path.exists(snake_case__):
os.makedirs(snake_case__)
lowerCAmelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase_ : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ : Dict = {k: float(snake_case__) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : str = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case__ , snake_case__ , "pr_exact")
merge_eval(snake_case__ , snake_case__ , "pr_f1")
merge_eval(snake_case__ , snake_case__ , "pr_oracle")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if not qid_list:
return
lowerCAmelCase_ : Optional[Any] = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : Dict = np.ones_like(snake_case__) / float(len(snake_case__))
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(snake_case__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase_ : str = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : str = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
for i, qid in enumerate(snake_case__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : List[Any] = -1
else:
lowerCAmelCase_ : List[str] = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[Any] = cur_score
lowerCAmelCase_ : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(snake_case__), best_thresh
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = best_exact
lowerCAmelCase_ : List[str] = exact_thresh
lowerCAmelCase_ : Any = best_fa
lowerCAmelCase_ : List[str] = fa_thresh
def UpperCamelCase ( ):
with open(OPTS.data_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
lowerCAmelCase_ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCAmelCase_ : int = json.load(snake_case__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
else:
lowerCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
lowerCAmelCase_ : Tuple = make_qid_to_has_ans(snake_case__) # maps qid to True/False
lowerCAmelCase_ : Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_raw_scores(snake_case__ , snake_case__)
lowerCAmelCase_ : str = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Dict = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__)
if has_ans_qids:
lowerCAmelCase_ : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "HasAns")
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir)
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(snake_case__ , snake_case__)
else:
print(json.dumps(snake_case__ , indent=2))
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 683 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = (IPNDMScheduler,)
UpperCamelCase_ = (('num_inference_steps', 5_0),)
def UpperCAmelCase_ ( self : List[str] ,**lowerCAmelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = {"num_train_timesteps": 10_00}
config.update(**lowerCAmelCase__ )
return config
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Tuple=0 ,**lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ : str = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Any = kwargs.pop("num_inference_steps" ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.dummy_sample
lowerCAmelCase_ : int = 0.1 * sample
lowerCAmelCase_ : Any = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : int = self.get_scheduler_config(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
lowerCAmelCase_ : Union[str, Any] = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase_ : Tuple = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = scheduler_class.from_pretrained(lowerCAmelCase__ )
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
lowerCAmelCase_ : Any = dummy_past_residuals[:]
lowerCAmelCase_ : List[Any] = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ).prev_sample
lowerCAmelCase_ : Optional[Any] = new_scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase_ : Any = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ).prev_sample
lowerCAmelCase_ : Union[str, Any] = new_scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any]=0 ,**lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Dict = kwargs.pop("num_inference_steps" ,lowerCAmelCase__ )
lowerCAmelCase_ : str = self.dummy_sample
lowerCAmelCase_ : List[str] = 0.1 * sample
lowerCAmelCase_ : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : List[str] = self.get_scheduler_config()
lowerCAmelCase_ : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase_ : List[Any] = dummy_past_residuals[:]
if time_step is None:
lowerCAmelCase_ : str = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
lowerCAmelCase_ : int = scheduler_class.from_pretrained(lowerCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase_ : Any = dummy_past_residuals[:]
lowerCAmelCase_ : Optional[int] = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ).prev_sample
lowerCAmelCase_ : List[str] = new_scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowerCAmelCase_ : Union[str, Any] = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ).prev_sample
lowerCAmelCase_ : Optional[int] = new_scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase_ ( self : int ,**lowerCAmelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = self.scheduler_classes[0]
lowerCAmelCase_ : Optional[int] = self.get_scheduler_config(**lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
lowerCAmelCase_ : str = 10
lowerCAmelCase_ : int = self.dummy_model()
lowerCAmelCase_ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Any = model(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : int = model(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : int = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ).prev_sample
return sample
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Tuple = kwargs.pop("num_inference_steps" ,lowerCAmelCase__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Tuple = self.get_scheduler_config()
lowerCAmelCase_ : List[Any] = scheduler_class(**lowerCAmelCase__ )
lowerCAmelCase_ : str = self.dummy_sample
lowerCAmelCase_ : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase__ ,"set_timesteps" ):
scheduler.set_timesteps(lowerCAmelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase__ ,"set_timesteps" ):
lowerCAmelCase_ : Optional[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase_ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowerCAmelCase_ : List[Any] = dummy_past_residuals[:]
lowerCAmelCase_ : Optional[int] = scheduler.timesteps[5]
lowerCAmelCase_ : List[str] = scheduler.timesteps[6]
lowerCAmelCase_ : List[str] = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ).prev_sample
lowerCAmelCase_ : Union[str, Any] = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
lowerCAmelCase_ : int = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ).prev_sample
lowerCAmelCase_ : List[Any] = scheduler.step(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ,time_step=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase__ ,time_step=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = self.full_loop()
lowerCAmelCase_ : Dict = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 683 |
from math import sqrt
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = 0
for i in range(1 , int(sqrt(snake_case__) + 1)):
if n % i == 0 and i != sqrt(snake_case__):
total += i + n // i
elif i == sqrt(snake_case__):
total += i
return total - n
def UpperCamelCase ( snake_case__ = 1_00_00):
lowerCAmelCase_ : int = sum(
i
for i in range(1 , snake_case__)
if sum_of_divisors(sum_of_divisors(snake_case__)) == i and sum_of_divisors(snake_case__) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 683 | 1 |
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCamelCase ( snake_case__ = 1_00):
lowerCAmelCase_ : str = 1
lowerCAmelCase_ : Tuple = 2
for i in range(2 , max_n + 1):
lowerCAmelCase_ : Union[str, Any] = pre_numerator
lowerCAmelCase_ : int = 2 * i // 3 if i % 3 == 0 else 1
lowerCAmelCase_ : Union[str, Any] = cur_numerator
lowerCAmelCase_ : List[Any] = e_cont * pre_numerator + temp
return sum_digits(snake_case__)
if __name__ == "__main__":
print(f"{solution() = }")
| 683 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_lowercase = None
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
_lowercase = '''▁'''
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = AlbertTokenizer
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : str=None ,lowerCAmelCase__ : Union[str, Any]=True ,lowerCAmelCase__ : Dict=True ,lowerCAmelCase__ : List[Any]=False ,lowerCAmelCase__ : Optional[int]="[CLS]" ,lowerCAmelCase__ : List[Any]="[SEP]" ,lowerCAmelCase__ : Dict="<unk>" ,lowerCAmelCase__ : int="[SEP]" ,lowerCAmelCase__ : Optional[Any]="<pad>" ,lowerCAmelCase__ : List[str]="[CLS]" ,lowerCAmelCase__ : Optional[Any]="[MASK]" ,**lowerCAmelCase__ : Optional[int] ,) -> int:
'''simple docstring'''
lowerCAmelCase_ : int = (
AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ,normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,remove_space=lowerCAmelCase__ ,keep_accents=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Dict = do_lower_case
lowerCAmelCase_ : List[str] = remove_space
lowerCAmelCase_ : Any = keep_accents
lowerCAmelCase_ : Optional[Any] = vocab_file
lowerCAmelCase_ : Optional[int] = False if not self.vocab_file else True
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : List[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file ,lowerCAmelCase__ )
return (out_vocab_file,)
| 683 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : str = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Union[str, Any] = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Optional[Any]="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : int="<mask>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : int ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Dict = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[Any] = bytes_to_unicode()
lowerCAmelCase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Dict = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Tuple = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Optional[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Union[str, Any] = " " + text
return (text, kwargs)
| 683 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ShapEPipeline
UpperCamelCase_ = ['prompt']
UpperCamelCase_ = ['prompt']
UpperCamelCase_ = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCamelCase_ = False
@property
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
'''simple docstring'''
return 8
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCAmelCase_ ( self : int ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=self.text_embedder_hidden_size ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
return CLIPTextModelWithProjection(lowerCAmelCase__ )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
lowerCAmelCase_ : Optional[int] = PriorTransformer(**lowerCAmelCase__ )
return model
@property
def UpperCAmelCase_ ( self : str ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : int = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase_ : Any = ShapERenderer(**lowerCAmelCase__ )
return model
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : int = self.dummy_prior
lowerCAmelCase_ : Optional[int] = self.dummy_text_encoder
lowerCAmelCase_ : Tuple = self.dummy_tokenizer
lowerCAmelCase_ : Any = self.dummy_renderer
lowerCAmelCase_ : List[Any] = HeunDiscreteScheduler(
beta_schedule="exp" ,num_train_timesteps=10_24 ,prediction_type="sample" ,use_karras_sigmas=lowerCAmelCase__ ,clip_sample=lowerCAmelCase__ ,clip_sample_range=1.0 ,)
lowerCAmelCase_ : Union[str, Any] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : str=0 ) -> Union[str, Any]:
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : int = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : List[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : str = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = "cpu"
lowerCAmelCase_ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase_ : str = self.pipeline_class(**lowerCAmelCase__ )
lowerCAmelCase_ : str = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : str = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
lowerCAmelCase_ : Tuple = output.images[0]
lowerCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase_ : List[str] = np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self : Any ) -> int:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : str = torch_device == "cpu"
lowerCAmelCase_ : List[str] = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=lowerCAmelCase__ ,relax_max_difference=lowerCAmelCase__ ,)
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase_ : Dict = self.pipeline_class(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Tuple = 2
lowerCAmelCase_ : str = self.get_dummy_inputs(lowerCAmelCase__ )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase_ : List[str] = batch_size * [inputs[key]]
lowerCAmelCase_ : List[str] = pipe(**lowerCAmelCase__ ,num_images_per_prompt=lowerCAmelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
lowerCAmelCase_ : Optional[int] = ShapEPipeline.from_pretrained("openai/shap-e" )
lowerCAmelCase_ : List[str] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
lowerCAmelCase_ : List[str] = pipe(
"a shark" ,generator=lowerCAmelCase__ ,guidance_scale=15.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="np" ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase__ ,lowerCAmelCase__ )
| 683 |
from collections.abc import Iterable
from typing import Any
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : int | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Node | None = None # Added in order to delete a node easier
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Union[str, Any] ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} ,indent=1 )
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Node | None = None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = root
def __str__( self : Dict ) -> str:
'''simple docstring'''
return str(self.root )
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Node ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if new_children is not None: # reset its kids
lowerCAmelCase_ : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase__ ): # If it is the right children
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : List[Any] = new_children
else:
lowerCAmelCase_ : Any = new_children
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : Node ) -> bool:
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.root is None
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Union[str, Any] ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = Node(lowerCAmelCase__ ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ : Optional[int] = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ : List[Any] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ : Dict = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ : List[str] = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ : Dict = new_node
break
else:
lowerCAmelCase_ : str = parent_node.right
lowerCAmelCase_ : Optional[int] = parent_node
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Tuple ) -> None:
'''simple docstring'''
for value in values:
self.__insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Optional[int] ) -> Node | None:
'''simple docstring'''
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ : Union[str, Any] = node.left if value < node.value else node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
if self.root is None:
return None
lowerCAmelCase_ : Dict = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ : Union[str, Any] = node.right
return node
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Node | None = None ) -> Node | None:
'''simple docstring'''
if node is None:
lowerCAmelCase_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ : Dict = self.root
while node.left is not None:
lowerCAmelCase_ : Union[str, Any] = node.left
return node
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = self.search(lowerCAmelCase__ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase__ ,lowerCAmelCase__ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase__ ,node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase__ ,node.left )
else:
lowerCAmelCase_ : int = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Node | None ) -> Iterable:
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : list ,lowerCAmelCase__ : Node | None ) -> None:
'''simple docstring'''
if node:
self.inorder(lowerCAmelCase__ ,node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase__ ,node.right )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Node ) -> int:
'''simple docstring'''
lowerCAmelCase_ : list[int] = []
self.inorder(lowerCAmelCase__ ,lowerCAmelCase__ ) # append all values to list using inorder traversal
return arr[k - 1]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = []
if curr_node is not None:
lowerCAmelCase_ : Dict = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def UpperCamelCase ( ):
lowerCAmelCase_ : Tuple = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ : Tuple = BinarySearchTree()
for i in testlist:
t.insert(snake_case__)
# Prints all the elements of the list in order traversal
print(snake_case__)
if t.search(6) is not None:
print("The value 6 exists")
else:
print("The value 6 doesn't exist")
if t.search(-1) is not None:
print("The value -1 exists")
else:
print("The value -1 doesn't exist")
if not t.empty():
print("Max Value: " , t.get_max().value) # type: ignore
print("Min Value: " , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case__)
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 683 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 |
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : str = "" ,lowerCAmelCase__ : bool = False ) -> None:
'''simple docstring'''
lowerCAmelCase_ : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
lowerCAmelCase_ : int = is_leaf
lowerCAmelCase_ : Optional[Any] = prefix
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for q, w in zip(self.prefix ,lowerCAmelCase__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
lowerCAmelCase_ : Optional[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowerCAmelCase_ : List[Any] = RadixNode(prefix=lowerCAmelCase__ ,is_leaf=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = self.nodes[word[0]]
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = incoming_node.match(
lowerCAmelCase__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowerCAmelCase_ : Optional[int] = remaining_prefix
lowerCAmelCase_ : Optional[int] = self.nodes[matching_string[0]]
lowerCAmelCase_ : List[Any] = RadixNode(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = aux_node
if remaining_word == "":
lowerCAmelCase_ : List[str] = True
else:
self.nodes[matching_string[0]].insert(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = self.nodes.get(word[0] ,lowerCAmelCase__ )
if not incoming_node:
return False
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = incoming_node.match(
lowerCAmelCase__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(lowerCAmelCase__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowerCAmelCase_ : str = list(self.nodes.values() )[0]
lowerCAmelCase_ : Tuple = merging_node.is_leaf
self.prefix += merging_node.prefix
lowerCAmelCase_ : Optional[int] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowerCAmelCase_ : Optional[Any] = False
# If there is 1 edge, we merge it with its child
else:
lowerCAmelCase_ : Tuple = list(incoming_node.nodes.values() )[0]
lowerCAmelCase_ : Union[str, Any] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowerCAmelCase_ : str = merging_node.nodes
return True
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height ,self.prefix ," (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
lowerCAmelCase_ : Dict = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_ : List[Any] = RadixNode()
root.insert_many(snake_case__)
assert all(root.find(snake_case__) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = RadixNode()
lowerCAmelCase_ : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(snake_case__)
print("Words:" , snake_case__)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 683 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Any = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Any = 20
lowerCAmelCase_ : Optional[Any] = self._get_uniform_logits(batch_size=2 ,length=lowerCAmelCase__ )
# tweak scores to not be uniform anymore
lowerCAmelCase_ : List[str] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCAmelCase_ : Union[str, Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCAmelCase_ : Optional[Any] = jax.nn.softmax(lowerCAmelCase__ ,axis=-1 )
lowerCAmelCase_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase_ : str = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCAmelCase_ : int = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ ,scores.copy() ,cur_len=lowerCAmelCase__ ) ,axis=-1 )
lowerCAmelCase_ : Optional[Any] = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ ,scores.copy() ,cur_len=lowerCAmelCase__ ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Dict = 10
lowerCAmelCase_ : Tuple = 2
# create ramp distribution
lowerCAmelCase_ : List[Any] = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] ,(batch_size, vocab_size) ).copy()
lowerCAmelCase_ : int = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCAmelCase_ : str = FlaxTopKLogitsWarper(3 )
lowerCAmelCase_ : Any = top_k_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCAmelCase_ : List[Any] = 5
lowerCAmelCase_ : Dict = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
lowerCAmelCase_ : List[Any] = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] ,(batch_size, length) ).copy()
lowerCAmelCase_ : List[Any] = top_k_warp_safety_check(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : List[str] = 10
lowerCAmelCase_ : Optional[int] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCAmelCase_ : Optional[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCAmelCase_ : Any = FlaxTopPLogitsWarper(0.8 )
lowerCAmelCase_ : Union[str, Any] = np.exp(top_p_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCAmelCase_ : Optional[Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCAmelCase_ : Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__ )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCAmelCase_ : Optional[Any] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCAmelCase_ : Tuple = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
lowerCAmelCase_ : Any = top_p_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 20
lowerCAmelCase_ : List[str] = 4
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=lowerCAmelCase__ )
# check that min length is applied at length 5
lowerCAmelCase_ : Any = ids_tensor((batch_size, 20) ,vocab_size=20 )
lowerCAmelCase_ : Dict = 5
lowerCAmelCase_ : Any = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : int = min_dist_processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
lowerCAmelCase_ : Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = 15
lowerCAmelCase_ : Optional[int] = min_dist_processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : int = 20
lowerCAmelCase_ : Any = 4
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : str = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
# check that all scores are -inf except the bos_token_id score
lowerCAmelCase_ : Any = ids_tensor((batch_size, 1) ,vocab_size=20 )
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : int = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = logits_processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = logits_processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = 20
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Any = 5
lowerCAmelCase_ : Any = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCAmelCase_ : Optional[Any] = ids_tensor((batch_size, 4) ,vocab_size=20 )
lowerCAmelCase_ : Union[str, Any] = 4
lowerCAmelCase_ : Any = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = logits_processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCAmelCase_ : Union[str, Any] = 3
lowerCAmelCase_ : Optional[int] = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = logits_processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
self.assertFalse(jnp.isinf(lowerCAmelCase__ ).any() )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = 4
lowerCAmelCase_ : Union[str, Any] = 10
lowerCAmelCase_ : List[Any] = 15
lowerCAmelCase_ : Tuple = 2
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : Dict = 15
# dummy input_ids and scores
lowerCAmelCase_ : List[Any] = ids_tensor((batch_size, sequence_length) ,lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = input_ids.copy()
lowerCAmelCase_ : str = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : int = scores.copy()
# instantiate all dist processors
lowerCAmelCase_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase_ : List[Any] = FlaxTopKLogitsWarper(3 )
lowerCAmelCase_ : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase_ : Any = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = 10
# no processor list
lowerCAmelCase_ : int = temp_dist_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = top_k_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = top_p_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = min_dist_proc(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = bos_dist_proc(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = eos_dist_proc(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
# with processor list
lowerCAmelCase_ : Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase_ : List[str] = processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Any = 4
lowerCAmelCase_ : Dict = 10
lowerCAmelCase_ : int = 15
lowerCAmelCase_ : Union[str, Any] = 2
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = 15
# dummy input_ids and scores
lowerCAmelCase_ : str = ids_tensor((batch_size, sequence_length) ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = input_ids.copy()
lowerCAmelCase_ : Optional[int] = self._get_uniform_logits(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = scores.copy()
# instantiate all dist processors
lowerCAmelCase_ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCAmelCase_ : List[Any] = FlaxTopKLogitsWarper(3 )
lowerCAmelCase_ : List[Any] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCAmelCase_ : str = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ ,eos_token_id=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = temp_dist_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = top_k_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Any = top_p_warp(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = min_dist_proc(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bos_dist_proc(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = eos_dist_proc(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : str ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Union[str, Any] ):
lowerCAmelCase_ : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCAmelCase_ : Any = processor(lowerCAmelCase__ ,lowerCAmelCase__ ,cur_len=lowerCAmelCase__ )
return scores
lowerCAmelCase_ : Any = jax.jit(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = jax.jit(lowerCAmelCase__ )
lowerCAmelCase_ : str = jitted_run_no_processor_list(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = jitted_run_processor_list(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ ,lowerCAmelCase__ ,atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 683 |
from __future__ import annotations
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
if (electron_conc, hole_conc, intrinsic_conc).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor")
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor")
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor")
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase = ['''text''', '''image''', '''audio''']
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((5_12, 5_12)))
elif input_type == "audio":
inputs.append(torch.ones(30_00))
elif isinstance(snake_case__ , snake_case__):
inputs.append(create_inputs(snake_case__))
else:
raise ValueError(F'''Invalid type requested: {input_type}''')
return inputs
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = []
for output in outputs:
if isinstance(snake_case__ , (str, AgentText)):
output_types.append("text")
elif isinstance(snake_case__ , (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(snake_case__ , (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(F'''Invalid output: {output}''')
return output_types
@is_tool_test
class __snake_case :
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
lowerCAmelCase_ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase_ : Optional[int] = [outputs]
self.assertListEqual(output_types(lowerCAmelCase__ ) ,self.tool.outputs )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : str = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase__ ,self.tool.outputs ):
lowerCAmelCase_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = []
for _input, input_type in zip(lowerCAmelCase__ ,self.tool.inputs ):
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : int = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
| 683 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''],
'''processing_git''': ['''GitProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GitForCausalLM''',
'''GitModel''',
'''GitPreTrainedModel''',
'''GitVisionModel''',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'AutoTokenizer'
UpperCamelCase_ = ['tokenizer']
UpperCamelCase_ = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self : int ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[Any]=None ) -> Tuple:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = speaker_embeddings
@classmethod
def UpperCAmelCase_ ( cls : Any ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Dict="speaker_embeddings_path.json" ,**lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowerCAmelCase_ : Optional[Any] = get_file_from_repo(
lowerCAmelCase__ ,lowerCAmelCase__ ,subfolder=kwargs.pop("subfolder" ,lowerCAmelCase__ ) ,cache_dir=kwargs.pop("cache_dir" ,lowerCAmelCase__ ) ,force_download=kwargs.pop("force_download" ,lowerCAmelCase__ ) ,proxies=kwargs.pop("proxies" ,lowerCAmelCase__ ) ,resume_download=kwargs.pop("resume_download" ,lowerCAmelCase__ ) ,local_files_only=kwargs.pop("local_files_only" ,lowerCAmelCase__ ) ,use_auth_token=kwargs.pop("use_auth_token" ,lowerCAmelCase__ ) ,revision=kwargs.pop("revision" ,lowerCAmelCase__ ) ,)
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowerCAmelCase_ : Dict = None
else:
with open(lowerCAmelCase__ ) as speaker_embeddings_json:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
return cls(tokenizer=lowerCAmelCase__ ,speaker_embeddings=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Any="speaker_embeddings_path.json" ,lowerCAmelCase__ : Any="speaker_embeddings" ,lowerCAmelCase__ : bool = False ,**lowerCAmelCase__ : Union[str, Any] ,) -> List[Any]:
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ,"v2" ) ,exist_ok=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Optional[int] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowerCAmelCase_ : Dict = self._load_voice_preset(lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] ,lowerCAmelCase__ ,f'''{prompt_key}_{key}''' ) ,voice_preset[key] ,allow_pickle=lowerCAmelCase__ ,)
lowerCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase__ ,f'''{prompt_key}_{key}.npy''' )
lowerCAmelCase_ : List[str] = tmp_dict
with open(os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) ,"w" ) as fp:
json.dump(lowerCAmelCase__ ,lowerCAmelCase__ )
super().save_pretrained(lowerCAmelCase__ ,lowerCAmelCase__ ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str = None ,**lowerCAmelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : int = self.speaker_embeddings[voice_preset]
lowerCAmelCase_ : Optional[Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowerCAmelCase_ : List[str] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] ,subfolder=kwargs.pop("subfolder" ,lowerCAmelCase__ ) ,cache_dir=kwargs.pop("cache_dir" ,lowerCAmelCase__ ) ,force_download=kwargs.pop("force_download" ,lowerCAmelCase__ ) ,proxies=kwargs.pop("proxies" ,lowerCAmelCase__ ) ,resume_download=kwargs.pop("resume_download" ,lowerCAmelCase__ ) ,local_files_only=kwargs.pop("local_files_only" ,lowerCAmelCase__ ) ,use_auth_token=kwargs.pop("use_auth_token" ,lowerCAmelCase__ ) ,revision=kwargs.pop("revision" ,lowerCAmelCase__ ) ,)
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowerCAmelCase_ : str = np.load(lowerCAmelCase__ )
return voice_preset_dict
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : Optional[dict] = None ) -> Tuple:
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : Dict ,lowerCAmelCase__ : int=None ,lowerCAmelCase__ : Union[str, Any]=None ,lowerCAmelCase__ : Union[str, Any]="pt" ,lowerCAmelCase__ : int=2_56 ,lowerCAmelCase__ : str=False ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : Optional[Any]=False ,**lowerCAmelCase__ : List[Any] ,) -> Optional[int]:
'''simple docstring'''
if voice_preset is not None and not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
if (
isinstance(lowerCAmelCase__ ,lowerCAmelCase__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowerCAmelCase_ : List[str] = self._load_voice_preset(lowerCAmelCase__ )
else:
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) and not voice_preset.endswith(".npz" ):
lowerCAmelCase_ : List[Any] = voice_preset + ".npz"
lowerCAmelCase_ : Dict = np.load(lowerCAmelCase__ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : str = BatchFeature(data=lowerCAmelCase__ ,tensor_type=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = self.tokenizer(
lowerCAmelCase__ ,return_tensors=lowerCAmelCase__ ,padding="max_length" ,max_length=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,return_token_type_ids=lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
if voice_preset is not None:
lowerCAmelCase_ : Dict = voice_preset
return encoded_text
| 683 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def UpperCamelCase ( ):
lowerCAmelCase_ : List[str] = HfArgumentParser(snake_case__)
lowerCAmelCase_ : List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ : Optional[int] = TensorFlowBenchmark(args=snake_case__)
try:
lowerCAmelCase_ : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
lowerCAmelCase_ : Tuple = " ".join(str(snake_case__).split(" ")[:-1])
lowerCAmelCase_ : Union[str, Any] = ""
lowerCAmelCase_ : Optional[Any] = eval(str(snake_case__).split(" ")[-1])
lowerCAmelCase_ : Tuple = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:])
else:
wrong_args.append(snake_case__)
if len(snake_case__) > 0:
lowerCAmelCase_ : Optional[Any] = full_error_msg + begin_error_msg + str(snake_case__)
raise ValueError(snake_case__)
benchmark.run()
if __name__ == "__main__":
main()
| 683 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_lowercase = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
_lowercase = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
_lowercase = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" ,id="token" ) ,id="sequence" ) ,id="references" ),
} ) ,codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] ,reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] ,)
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : List[str]=4 ,lowerCAmelCase__ : List[str]=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = compute_bleu(
reference_corpus=lowerCAmelCase__ ,translation_corpus=lowerCAmelCase__ ,max_order=lowerCAmelCase__ ,smooth=lowerCAmelCase__ )
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) : Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 683 |
_lowercase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCamelCase ( snake_case__):
assert type(snake_case__) in (int, float) and decimal == int(snake_case__)
lowerCAmelCase_ : Optional[Any] = int(snake_case__)
lowerCAmelCase_ : Tuple = ""
lowerCAmelCase_ : str = False
if decimal < 0:
lowerCAmelCase_ : Tuple = True
decimal *= -1
while decimal > 0:
lowerCAmelCase_ , lowerCAmelCase_ : Any = divmod(snake_case__ , 16)
lowerCAmelCase_ : Dict = values[remainder] + hexadecimal
lowerCAmelCase_ : List[str] = "0x" + hexadecimal
if negative:
lowerCAmelCase_ : Optional[Any] = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowercase = {'''UserAgent''': UserAgent().random}
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = script.contents[0]
lowerCAmelCase_ : List[str] = json.loads(data[data.find("{\"config\"") : -1])
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __snake_case :
"""simple docstring"""
def __init__( self : Dict ,lowerCAmelCase__ : int ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : List[str] = f'''https://www.instagram.com/{username}/'''
lowerCAmelCase_ : Dict = self.get_json()
def UpperCAmelCase_ ( self : Optional[Any] ) -> dict:
'''simple docstring'''
lowerCAmelCase_ : Any = requests.get(self.url ,headers=lowerCAmelCase__ ).text
lowerCAmelCase_ : Tuple = BeautifulSoup(lowerCAmelCase__ ,"html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Any ) -> str:
'''simple docstring'''
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : List[str] ) -> str:
'''simple docstring'''
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def UpperCAmelCase_ ( self : List[str] ) -> str:
'''simple docstring'''
return self.user_data["username"]
@property
def UpperCAmelCase_ ( self : Dict ) -> str:
'''simple docstring'''
return self.user_data["full_name"]
@property
def UpperCAmelCase_ ( self : str ) -> str:
'''simple docstring'''
return self.user_data["biography"]
@property
def UpperCAmelCase_ ( self : List[Any] ) -> str:
'''simple docstring'''
return self.user_data["business_email"]
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.user_data["external_url"]
@property
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def UpperCAmelCase_ ( self : Tuple ) -> int:
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def UpperCAmelCase_ ( self : List[str] ) -> bool:
'''simple docstring'''
return self.user_data["is_verified"]
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> bool:
'''simple docstring'''
return self.user_data["is_private"]
def UpperCamelCase ( snake_case__ = "github"):
import os
if os.environ.get("CI"):
return # test failing on GitHub Actions
lowerCAmelCase_ : Union[str, Any] = InstagramUser(snake_case__)
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , snake_case__)
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram.")
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = InstagramUser('''github''')
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.number_of_followings = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }")
| 683 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_lowercase = ['''text''', '''image''', '''audio''']
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : int = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input")
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO")) / "000000039769.png").resize((5_12, 5_12)))
elif input_type == "audio":
inputs.append(torch.ones(30_00))
elif isinstance(snake_case__ , snake_case__):
inputs.append(create_inputs(snake_case__))
else:
raise ValueError(F'''Invalid type requested: {input_type}''')
return inputs
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : List[Any] = []
for output in outputs:
if isinstance(snake_case__ , (str, AgentText)):
output_types.append("text")
elif isinstance(snake_case__ , (Image.Image, AgentImage)):
output_types.append("image")
elif isinstance(snake_case__ , (torch.Tensor, AgentAudio)):
output_types.append("audio")
else:
raise ValueError(F'''Invalid output: {output}''')
return output_types
@is_tool_test
class __snake_case :
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"inputs" ) )
self.assertTrue(hasattr(self.tool ,"outputs" ) )
lowerCAmelCase_ : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase_ : Any = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase_ : Optional[int] = [outputs]
self.assertListEqual(output_types(lowerCAmelCase__ ) ,self.tool.outputs )
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
self.assertTrue(hasattr(self.tool ,"description" ) )
self.assertTrue(hasattr(self.tool ,"default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : str = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCAmelCase__ ,self.tool.outputs ):
lowerCAmelCase_ : Tuple = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = create_inputs(self.tool.inputs )
lowerCAmelCase_ : List[Any] = []
for _input, input_type in zip(lowerCAmelCase__ ,self.tool.inputs ):
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase_ : List[Any] = self.tool(*lowerCAmelCase__ )
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCAmelCase_ : int = [outputs]
self.assertEqual(len(lowerCAmelCase__ ) ,len(self.tool.outputs ) )
| 683 | 1 |
_lowercase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowercase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowercase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 683 |
import pytest
_lowercase = '''__dummy_dataset1__'''
_lowercase = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def UpperCamelCase ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = dataset_loading_script_name
lowerCAmelCase_ : List[str] = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=snake_case__)
lowerCAmelCase_ : List[Any] = script_dir / F'''{script_name}.py'''
with open(snake_case__ , "w") as f:
f.write(snake_case__)
return str(snake_case__)
| 683 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
_lowercase = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
UpperCamelCase_ = BartTokenizer
def __init__( self : str ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Optional[Any]=None ,lowerCAmelCase__ : Optional[Any]=None ,lowerCAmelCase__ : Dict="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : Tuple="</s>" ,lowerCAmelCase__ : int="</s>" ,lowerCAmelCase__ : int="<s>" ,lowerCAmelCase__ : Optional[int]="<unk>" ,lowerCAmelCase__ : List[str]="<pad>" ,lowerCAmelCase__ : Dict="<mask>" ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : Tuple=True ,**lowerCAmelCase__ : str ,) -> int:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,trim_offsets=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" ,lowerCAmelCase__ ) != add_prefix_space:
lowerCAmelCase_ : List[Any] = getattr(lowerCAmelCase__ ,pre_tok_state.pop("type" ) )
lowerCAmelCase_ : List[Any] = add_prefix_space
lowerCAmelCase_ : List[str] = pre_tok_class(**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase_ : int = "post_processor"
lowerCAmelCase_ : str = getattr(self.backend_tokenizer ,lowerCAmelCase__ ,lowerCAmelCase__ )
if tokenizer_component_instance:
lowerCAmelCase_ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase_ : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
lowerCAmelCase_ : Optional[Any] = tuple(state["cls"] )
lowerCAmelCase_ : Optional[int] = False
if state.get("add_prefix_space" ,lowerCAmelCase__ ) != add_prefix_space:
lowerCAmelCase_ : Union[str, Any] = add_prefix_space
lowerCAmelCase_ : str = True
if state.get("trim_offsets" ,lowerCAmelCase__ ) != trim_offsets:
lowerCAmelCase_ : Optional[Any] = trim_offsets
lowerCAmelCase_ : str = True
if changes_to_apply:
lowerCAmelCase_ : List[str] = getattr(lowerCAmelCase__ ,state.pop("type" ) )
lowerCAmelCase_ : List[Any] = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer ,lowerCAmelCase__ ,lowerCAmelCase__ )
@property
def UpperCAmelCase_ ( self : Dict ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else value
lowerCAmelCase_ : Optional[Any] = value
def UpperCAmelCase_ ( self : str ,*lowerCAmelCase__ : Union[str, Any] ,**lowerCAmelCase__ : List[Any] ) -> BatchEncoding:
'''simple docstring'''
lowerCAmelCase_ : Dict = kwargs.get("is_split_into_words" ,lowerCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Dict ,*lowerCAmelCase__ : List[Any] ,**lowerCAmelCase__ : Dict ) -> BatchEncoding:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = kwargs.get("is_split_into_words" ,lowerCAmelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs." )
return super()._encode_plus(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : List[str]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 683 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = CodeGenTokenizer
UpperCamelCase_ = CodeGenTokenizerFast
UpperCamelCase_ = True
UpperCamelCase_ = {'add_prefix_space': True}
UpperCamelCase_ = False
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowerCAmelCase_ : int = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ : List[Any] = {"unk_token": "<unk>"}
lowerCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,**lowerCAmelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = "lower newer"
lowerCAmelCase_ : Tuple = "lower newer"
return input_text, output_text
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowerCAmelCase_ : Dict = "lower newer"
lowerCAmelCase_ : Dict = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase_ : Union[str, Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Tuple = self.get_tokenizer()
lowerCAmelCase_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = "lower newer"
# Testing tokenization
lowerCAmelCase_ : Tuple = tokenizer.tokenize(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids without special tokens
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Any = rust_tokenizer.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing conversion to ids with special tokens
lowerCAmelCase_ : int = self.get_rust_tokenizer(add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : str = tokenizer.encode(lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
# Testing the unknown token
lowerCAmelCase_ : Union[str, Any] = tokens + [rust_tokenizer.unk_token]
lowerCAmelCase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ,*lowerCAmelCase__ : List[str] ,**lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Any=15 ) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
# Simple input
lowerCAmelCase_ : int = "This is a simple input"
lowerCAmelCase_ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : str = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Simple input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(lowerCAmelCase__ ,tokenizer_r.encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" )
# Pair input
self.assertRaises(
lowerCAmelCase__ ,tokenizer_r.batch_encode_plus ,lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,padding="max_length" ,)
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token="<pad>" )
# Simple input
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowerCAmelCase_ : Any = ("This is a simple input", "This is a pair")
lowerCAmelCase_ : List[str] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowerCAmelCase_ : Dict = tokenizer.pad_token_id
lowerCAmelCase_ : Union[str, Any] = tokenizer(lowerCAmelCase__ ,padding="max_length" ,max_length=30 ,return_tensors="np" )
lowerCAmelCase_ : Tuple = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
lowerCAmelCase_ : Any = tokenizer(*lowerCAmelCase__ ,padding="max_length" ,max_length=60 ,return_tensors="np" )
lowerCAmelCase_ : Optional[int] = tokenizer(lowerCAmelCase__ ,padding=lowerCAmelCase__ ,truncate=lowerCAmelCase__ ,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] ,30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] ,33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] ,60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] ,52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = "$$$"
lowerCAmelCase_ : List[str] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=lowerCAmelCase__ ,add_bos_token=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = "This is a simple input"
lowerCAmelCase_ : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ : int = tokenizer.bos_token_id
lowerCAmelCase_ : List[Any] = tokenizer(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = tokenizer(lowerCAmelCase__ )
self.assertEqual(out_s.input_ids[0] ,lowerCAmelCase__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCAmelCase_ : List[str] = tokenizer.decode(out_s.input_ids )
lowerCAmelCase_ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] ,lowerCAmelCase__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
lowerCAmelCase_ : str = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
lowerCAmelCase_ : int = "\nif len_a > len_b: result = a\nelse: result = b"
lowerCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase__ )
lowerCAmelCase_ : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
lowerCAmelCase_ : Union[str, Any] = tokenizer.decode(lowerCAmelCase__ ,truncate_before_pattern=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
| 683 | 1 |
from typing import Any
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict = {}
lowerCAmelCase_ : dict = {}
for state in states_space:
lowerCAmelCase_ : List[Any] = observations_space[0]
lowerCAmelCase_ : int = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__)):
lowerCAmelCase_ : List[Any] = observations_space[o]
lowerCAmelCase_ : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Tuple = -1
for k_state in states_space:
lowerCAmelCase_ : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Optional[Any] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : Any = arg_max
# The final observation
lowerCAmelCase_ : List[Any] = observations_space[len(snake_case__) - 1]
# argmax for given final observation
lowerCAmelCase_ : List[str] = ""
lowerCAmelCase_ : List[str] = -1
for k_state in states_space:
lowerCAmelCase_ : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Tuple = k_state
lowerCAmelCase_ : str = arg_max
# Process pointers backwards
lowerCAmelCase_ : int = last_state
lowerCAmelCase_ : int = []
for o in range(len(snake_case__) - 1 , -1 , -1):
result.append(snake_case__)
lowerCAmelCase_ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__)
_validate_dicts(
snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError("There's an empty parameter")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_list(snake_case__ , "observations_space")
_validate_list(snake_case__ , "states_space")
def UpperCamelCase ( snake_case__ , snake_case__):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list'''
raise ValueError(snake_case__)
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
_validate_dict(snake_case__ , "initial_probabilities" , snake_case__)
_validate_nested_dict(snake_case__ , "transition_probabilities")
_validate_nested_dict(snake_case__ , "emission_probabilities")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_dict(_object , snake_case__ , snake_case__)
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object):
lowerCAmelCase_ : Dict = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object.values()):
lowerCAmelCase_ : Union[str, Any] = "nested dictionary " if nested else ""
lowerCAmelCase_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 |
from __future__ import annotations
from random import random
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[int] ,lowerCAmelCase__ : int | None = None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = value
lowerCAmelCase_ : Any = random()
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
def __repr__( self : Any ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} ,indent=1 )
def __str__( self : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = str(self.value ) + " "
lowerCAmelCase_ : List[Any] = str(self.left or "" )
lowerCAmelCase_ : Union[str, Any] = str(self.right or "" )
return value + left + right
def UpperCamelCase ( snake_case__ , snake_case__):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase_ , lowerCAmelCase_ : Any = split(root.left , snake_case__)
return left, root
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = split(root.right , snake_case__)
return root, right
def UpperCamelCase ( snake_case__ , snake_case__):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase_ : Dict = merge(left.right , snake_case__)
return left
else:
lowerCAmelCase_ : List[str] = merge(snake_case__ , right.left)
return right
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : List[Any] = Node(snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = split(snake_case__ , snake_case__)
return merge(merge(snake_case__ , snake_case__) , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = split(snake_case__ , value - 1)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = split(snake_case__ , snake_case__)
return merge(snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__):
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=",")
inorder(root.right)
def UpperCamelCase ( snake_case__ , snake_case__):
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase_ : List[str] = insert(snake_case__ , int(arg[1:]))
elif arg[0] == "-":
lowerCAmelCase_ : Optional[int] = erase(snake_case__ , int(arg[1:]))
else:
print("Unknown command")
return root
def UpperCamelCase ( ):
lowerCAmelCase_ : str = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. ")
lowerCAmelCase_ : str = input()
while args != "q":
lowerCAmelCase_ : int = interact_treap(snake_case__ , snake_case__)
print(snake_case__)
lowerCAmelCase_ : str = input()
print("good by!")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __snake_case ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableDiffusionPanoramaPipeline
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase_ ( self : List[str] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") ,up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") ,cross_attention_dim=32 ,)
lowerCAmelCase_ : List[str] = DDIMScheduler()
torch.manual_seed(0 )
lowerCAmelCase_ : Any = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,)
torch.manual_seed(0 )
lowerCAmelCase_ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=10_00 ,)
lowerCAmelCase_ : int = CLIPTextModel(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase_ : List[str] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Any=0 ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : str = torch.manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : int = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : List[Any] = self.get_dummy_components()
lowerCAmelCase_ : Optional[Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : int = sd_pipe(**lowerCAmelCase__ ).images
lowerCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Optional[int] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=3.2_5e-3 )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Optional[int] = self.get_dummy_components()
lowerCAmelCase_ : Dict = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = "french fries"
lowerCAmelCase_ : Optional[int] = sd_pipe(**lowerCAmelCase__ ,negative_prompt=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = output.images
lowerCAmelCase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Union[str, Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Dict = self.get_dummy_components()
lowerCAmelCase_ : List[str] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : str = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : int = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = sd_pipe(**lowerCAmelCase__ ,view_batch_size=2 )
lowerCAmelCase_ : Any = output.images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : Dict = self.get_dummy_components()
lowerCAmelCase_ : Optional[int] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" )
lowerCAmelCase_ : Union[str, Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : int = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = sd_pipe(**lowerCAmelCase__ ).images
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Dict = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ : int = self.get_dummy_components()
lowerCAmelCase_ : str = PNDMScheduler(
beta_start=0.00_085 ,beta_end=0.012 ,beta_schedule="scaled_linear" ,skip_prk_steps=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = self.get_dummy_inputs(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = sd_pipe(**lowerCAmelCase__ ).images
lowerCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : Any = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Any=0 ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = torch.manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : str = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : str = "stabilityai/stable-diffusion-2-base"
lowerCAmelCase_ : str = DDIMScheduler.from_pretrained(lowerCAmelCase__ ,subfolder="scheduler" )
lowerCAmelCase_ : Any = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ ,scheduler=lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
lowerCAmelCase_ : Optional[int] = self.get_inputs()
lowerCAmelCase_ : Any = pipe(**lowerCAmelCase__ ).images
lowerCAmelCase_ : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCAmelCase_ : Union[str, Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def UpperCAmelCase_ ( self : int ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : str = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" ,safety_checker=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
lowerCAmelCase_ : Optional[int] = self.get_inputs()
lowerCAmelCase_ : Union[str, Any] = pipe(**lowerCAmelCase__ ).images
lowerCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCAmelCase_ : Tuple = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 0
def callback_fn(lowerCAmelCase__ : int ,lowerCAmelCase__ : int ,lowerCAmelCase__ : torch.FloatTensor ) -> None:
lowerCAmelCase_ : str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCAmelCase_ : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCAmelCase_ : Union[str, Any] = latents[0, -3:, -3:, -1]
lowerCAmelCase_ : Dict = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCAmelCase_ : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCAmelCase_ : Tuple = latents[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[int] = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : List[Any] = "stabilityai/stable-diffusion-2-base"
lowerCAmelCase_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowerCAmelCase__ ,subfolder="scheduler" )
lowerCAmelCase_ : Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ ,scheduler=lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
lowerCAmelCase_ : Any = self.get_inputs()
pipe(**lowerCAmelCase__ ,callback=lowerCAmelCase__ ,callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ : int = "stabilityai/stable-diffusion-2-base"
lowerCAmelCase_ : List[Any] = DDIMScheduler.from_pretrained(lowerCAmelCase__ ,subfolder="scheduler" )
lowerCAmelCase_ : Any = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ ,scheduler=lowerCAmelCase__ ,safety_checker=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ : Any = self.get_inputs()
lowerCAmelCase_ : Optional[Any] = pipe(**lowerCAmelCase__ )
lowerCAmelCase_ : str = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 683 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
_lowercase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
_lowercase = {f"funnel-transformer/{name}": 512 for name in _model_names}
_lowercase = {f"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = FunnelTokenizer
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = 2
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : Optional[int]=None ,lowerCAmelCase__ : Optional[Any]=True ,lowerCAmelCase__ : List[str]="<unk>" ,lowerCAmelCase__ : int="<sep>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : List[str]="<cls>" ,lowerCAmelCase__ : Optional[int]="<mask>" ,lowerCAmelCase__ : Union[str, Any]="<s>" ,lowerCAmelCase__ : List[str]="</s>" ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : Tuple=True ,lowerCAmelCase__ : Any=None ,lowerCAmelCase__ : List[Any]="##" ,**lowerCAmelCase__ : int ,) -> List[Any]:
'''simple docstring'''
super().__init__(
lowerCAmelCase__ ,tokenizer_file=lowerCAmelCase__ ,do_lower_case=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,clean_text=lowerCAmelCase__ ,tokenize_chinese_chars=lowerCAmelCase__ ,strip_accents=lowerCAmelCase__ ,wordpieces_prefix=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" ,lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" ,lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" ,lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[int] = getattr(lowerCAmelCase__ ,normalizer_state.pop("type" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : List[str] = strip_accents
lowerCAmelCase_ : Any = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase_ : int = do_lower_case
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ,lowerCAmelCase__ : str=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : str = self._tokenizer.model.save(lowerCAmelCase__ ,name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 683 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
_lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
_lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2048,
}
def UpperCamelCase ( snake_case__ , snake_case__):
with open(snake_case__ , "r" , encoding="utf-8") as f:
lowerCAmelCase_ : Dict = json.loads(f.read())
lowerCAmelCase_ : Tuple = collections.OrderedDict()
lowerCAmelCase_ : Optional[int] = collections.OrderedDict()
lowerCAmelCase_ : List[str] = collections.OrderedDict()
with open(snake_case__ , "r" , encoding="utf-8") as f:
lowerCAmelCase_ : int = f.readlines()
lowerCAmelCase_ : List[str] = [[t.rstrip("\n")] if (t == "," or "," not in t) else t.rstrip("\n").split(",") for t in token]
for idx, b in enumerate(snake_case__):
lowerCAmelCase_ : Optional[int] = b
lowerCAmelCase_ : List[Any] = idx
for wd in b:
lowerCAmelCase_ : Dict = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : List[str] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : List[str]="<|endoftext|>" ,lowerCAmelCase__ : Optional[Any]="<|endoftext|>" ,lowerCAmelCase__ : List[Any]="<|startoftext|>" ,lowerCAmelCase__ : Optional[Any]="<|endoftext|>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : Union[str, Any] ,) -> List[Any]:
'''simple docstring'''
super().__init__(
unk_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,do_clean_text=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
f'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(
f'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
lowerCAmelCase_ : Optional[int] = do_clean_text
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = load_vocab_and_emoji(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
'''simple docstring'''
return len(self.raw_vocab )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
'''simple docstring'''
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : Any ) -> Any:
'''simple docstring'''
return self.subword_tokenizer.tokenize(lowerCAmelCase__ ,clean=self.do_clean_text )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
return self.vocab.get(lowerCAmelCase__ ,self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase_ : str = "".join(lowerCAmelCase__ ).strip()
return out_string
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : "Conversation" ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase__ ,add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] )
if len(lowerCAmelCase__ ) > self.model_max_length:
lowerCAmelCase_ : str = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 0
if os.path.isdir(lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Optional[int] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
lowerCAmelCase_ : List[str] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
lowerCAmelCase_ : Optional[Any] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowerCAmelCase_ : List[Any] = token_index
writer.write(",".join(lowerCAmelCase__ ) + "\n" )
index += 1
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
json.dump(self.emoji ,lowerCAmelCase__ )
return vocab_file, emoji_file
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : Dict ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : str = vocab # same as swe
lowerCAmelCase_ : List[str] = ids_to_tokens # same as bpe
lowerCAmelCase_ : List[str] = emoji
lowerCAmelCase_ : Union[str, Any] = np.max([len(lowerCAmelCase__ ) for w in self.vocab.keys()] )
lowerCAmelCase_ : List[Any] = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
lowerCAmelCase_ : List[str] = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
lowerCAmelCase_ : List[str] = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
lowerCAmelCase_ : Optional[int] = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
lowerCAmelCase_ : List[str] = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
lowerCAmelCase_ : str = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
lowerCAmelCase_ : List[str] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
lowerCAmelCase_ : int = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
lowerCAmelCase_ : int = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCAmelCase_ ( self : str ,lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.content_repattera.sub("<URL>" ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.content_repattera.sub("<EMAIL>" ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.content_repattera.sub("<TEL>" ,lowerCAmelCase__ )
lowerCAmelCase_ : Dict = self.content_repattera.sub("<DATE>" ,lowerCAmelCase__ )
lowerCAmelCase_ : int = self.content_repattera.sub("<DATE>" ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = self.content_repattera.sub("<PRICE>" ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
lowerCAmelCase_ : Tuple = content.replace("<BLOCK><BLOCK>" ,"<BLOCK>" )
return content
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = text.replace(" " ,"<SP>" )
lowerCAmelCase_ : int = text.replace(" " ,"<SP>" )
lowerCAmelCase_ : int = text.replace("\r\n" ,"<BR>" )
lowerCAmelCase_ : Union[str, Any] = text.replace("\n" ,"<BR>" )
lowerCAmelCase_ : int = text.replace("\r" ,"<BR>" )
lowerCAmelCase_ : Union[str, Any] = text.replace("\t" ,"<TAB>" )
lowerCAmelCase_ : Optional[Any] = text.replace("—" ,"ー" )
lowerCAmelCase_ : List[Any] = text.replace("−" ,"ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
lowerCAmelCase_ : str = text.replace(lowerCAmelCase__ ,lowerCAmelCase__ )
if clean:
lowerCAmelCase_ : Optional[Any] = self.clean_text(lowerCAmelCase__ )
def check_simbol(lowerCAmelCase__ : List[Any] ):
lowerCAmelCase_ : List[Any] = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 2:
lowerCAmelCase_ : Union[str, Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2a1 and c <= 0Xc2bf)
or (c >= 0Xc780 and c <= 0Xc783)
or (c >= 0Xcab9 and c <= 0Xcbbf)
or (c >= 0Xcc80 and c <= 0Xcda2)
):
return True
return False
def checkuae(lowerCAmelCase__ : int ):
lowerCAmelCase_ : Optional[int] = x.encode()
if len(lowerCAmelCase__ ) == 1 and len(lowerCAmelCase__ ) == 3:
lowerCAmelCase_ : Optional[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe28080 and c <= 0Xe2b07f:
return True
return False
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : List[str] = []
while pos < len(lowerCAmelCase__ ):
lowerCAmelCase_ : Dict = min(len(lowerCAmelCase__ ) ,pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
lowerCAmelCase_ : Optional[int] = [] # (token_id, token, pos)
for e in range(lowerCAmelCase__ ,lowerCAmelCase__ ,-1 ):
lowerCAmelCase_ : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase__ ) > 2:
lowerCAmelCase_ : List[str] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(lowerCAmelCase__ ) > 0:
# the smallest token_id is adopted
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = sorted(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : x[0] )[0]
result.append(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = e
else:
lowerCAmelCase_ : Union[str, Any] = pos + 1
lowerCAmelCase_ : List[str] = text[pos:end]
if check_simbol(lowerCAmelCase__ ):
result.append("<KIGOU>" )
elif checkuae(lowerCAmelCase__ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
lowerCAmelCase_ : List[str] = end
return result
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Tuple="\n" ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : Tuple = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode("utf-8" ,errors="replace" ) )
lowerCAmelCase_ : List[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(lowerCAmelCase__ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
words.append(bytearray(lowerCAmelCase__ ).decode("utf-8" ,errors="replace" ) )
lowerCAmelCase_ : Any = "".join(lowerCAmelCase__ )
return text
| 683 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__):
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested")
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested")
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested")
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment")
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate")
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule")
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__)
def UpperCamelCase ( snake_case__):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase_ : int = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__):
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowerCAmelCase_ : List[Any] = 0
# Doctest custom flag to ignore output.
_lowercase = doctest.register_optionflag('''IGNORE_RESULT''')
_lowercase = doctest.OutputChecker
class __snake_case ( snake_case__ ):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : int ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Any:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
_lowercase = CustomOutputChecker
_lowercase = HfDoctestModule
_lowercase = HfDocTestParser
| 683 | 1 |
_lowercase = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowercase = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowercase = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
assert len(str(snake_case__)) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowerCAmelCase_ : int = year // 1_00
lowerCAmelCase_ : Any = (5 * (century % 4) + 2) % 7
lowerCAmelCase_ : int = year % 1_00
lowerCAmelCase_ : Tuple = centurian % 12
lowerCAmelCase_ : List[str] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowerCAmelCase_ : Union[str, Any] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowerCAmelCase_ : str = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 683 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = list(snake_case__)
lowerCAmelCase_ : Tuple = list(snake_case__)
lowerCAmelCase_ : List[str] = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Dict = "_"
if count > 1:
return False
else:
return "".join(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
while True:
lowerCAmelCase_ : Tuple = ["$"] * len(snake_case__)
lowerCAmelCase_ : Tuple = []
for i in range(len(snake_case__)):
for j in range(i + 1 , len(snake_case__)):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j])
if k is False:
lowerCAmelCase_ : str = "*"
lowerCAmelCase_ : Tuple = "*"
temp.append("X")
for i in range(len(snake_case__)):
if checka[i] == "$":
pi.append(binary[i])
if len(snake_case__) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = []
for minterm in minterms:
lowerCAmelCase_ : Dict = ""
for _ in range(snake_case__):
lowerCAmelCase_ : Dict = str(minterm % 2) + string
minterm //= 2
temp.append(snake_case__)
return temp
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = list(snake_case__)
lowerCAmelCase_ : Dict = list(snake_case__)
lowerCAmelCase_ : Dict = 0
for i in range(len(snake_case__)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Dict = [0] * len(snake_case__)
for i in range(len(chart[0])):
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : int = -1
for j in range(len(snake_case__)):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Optional[int] = j
if count == 1:
lowerCAmelCase_ : Union[str, Any] = 1
for i in range(len(snake_case__)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(snake_case__)):
lowerCAmelCase_ : Tuple = 0
temp.append(prime_implicants[i])
while True:
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Dict = -1
lowerCAmelCase_ : Tuple = 0
for i in range(len(snake_case__)):
lowerCAmelCase_ : Dict = chart[i].count(1)
if count_n > max_n:
lowerCAmelCase_ : Optional[int] = count_n
lowerCAmelCase_ : Optional[Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(snake_case__)):
lowerCAmelCase_ : Any = 0
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = [[0 for x in range(len(snake_case__))] for x in range(len(snake_case__))]
for i in range(len(snake_case__)):
lowerCAmelCase_ : Optional[Any] = prime_implicants[i].count("_")
for j in range(len(snake_case__)):
if is_for_table(prime_implicants[i] , binary[j] , snake_case__):
lowerCAmelCase_ : Dict = 1
return chart
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = int(input("Enter the no. of variables\n"))
lowerCAmelCase_ : Tuple = [
float(snake_case__)
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n").split()
]
lowerCAmelCase_ : Any = decimal_to_binary(snake_case__ , snake_case__)
lowerCAmelCase_ : Dict = check(snake_case__)
print("Prime Implicants are:")
print(snake_case__)
lowerCAmelCase_ : int = prime_implicant_chart(snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = selection(snake_case__ , snake_case__)
print("Essential Prime Implicants are:")
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowercase = (720, 1280) # Height, Width
_lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowercase = 1 / 100
_lowercase = ''''''
_lowercase = ''''''
_lowercase = ''''''
_lowercase = 250
def UpperCamelCase ( ):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = get_dataset(snake_case__ , snake_case__)
for index in range(snake_case__):
lowerCAmelCase_ : Optional[int] = random.sample(range(len(snake_case__)) , 4)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = update_image_and_anno(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , filter_scale=snake_case__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase_ : Tuple = random_chars(32)
lowerCAmelCase_ : int = path.split(os.sep)[-1].rsplit("." , 1)[0]
lowerCAmelCase_ : Union[str, Any] = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 85])
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''')
lowerCAmelCase_ : int = []
for anno in new_annos:
lowerCAmelCase_ : Optional[int] = anno[3] - anno[1]
lowerCAmelCase_ : Optional[int] = anno[4] - anno[2]
lowerCAmelCase_ : Any = anno[1] + width / 2
lowerCAmelCase_ : List[str] = anno[2] + height / 2
lowerCAmelCase_ : str = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(snake_case__)
with open(F'''{file_root}.txt''' , "w") as outfile:
outfile.write("\n".join(line for line in annos_list))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : str = []
lowerCAmelCase_ : int = []
for label_file in glob.glob(os.path.join(snake_case__ , "*.txt")):
lowerCAmelCase_ : Union[str, Any] = label_file.split(os.sep)[-1].rsplit("." , 1)[0]
with open(snake_case__) as in_file:
lowerCAmelCase_ : int = in_file.readlines()
lowerCAmelCase_ : Optional[int] = os.path.join(snake_case__ , F'''{label_name}.jpg''')
lowerCAmelCase_ : str = []
for obj_list in obj_lists:
lowerCAmelCase_ : str = obj_list.rstrip("\n").split(" ")
lowerCAmelCase_ : Union[str, Any] = float(obj[1]) - float(obj[3]) / 2
lowerCAmelCase_ : Any = float(obj[2]) - float(obj[4]) / 2
lowerCAmelCase_ : Tuple = float(obj[1]) + float(obj[3]) / 2
lowerCAmelCase_ : List[str] = float(obj[2]) + float(obj[4]) / 2
boxes.append([int(obj[0]), xmin, ymin, xmax, ymax])
if not boxes:
continue
img_paths.append(snake_case__)
labels.append(snake_case__)
return img_paths, labels
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 0.0 , ):
lowerCAmelCase_ : Optional[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta)
lowerCAmelCase_ : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase_ : List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase_ : int = int(scale_x * output_size[1])
lowerCAmelCase_ : Optional[Any] = int(scale_y * output_size[0])
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : List[Any] = []
for i, index in enumerate(snake_case__):
lowerCAmelCase_ : Optional[int] = all_img_list[index]
path_list.append(snake_case__)
lowerCAmelCase_ : str = all_annos[index]
lowerCAmelCase_ : List[str] = cva.imread(snake_case__)
if i == 0: # top-left
lowerCAmelCase_ : List[str] = cva.resize(snake_case__ , (divid_point_x, divid_point_y))
lowerCAmelCase_ : Optional[int] = img
for bbox in img_annos:
lowerCAmelCase_ : Optional[Any] = bbox[1] * scale_x
lowerCAmelCase_ : List[Any] = bbox[2] * scale_y
lowerCAmelCase_ : Optional[int] = bbox[3] * scale_x
lowerCAmelCase_ : Dict = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax])
elif i == 1: # top-right
lowerCAmelCase_ : Union[str, Any] = cva.resize(snake_case__ , (output_size[1] - divid_point_x, divid_point_y))
lowerCAmelCase_ : Any = img
for bbox in img_annos:
lowerCAmelCase_ : int = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase_ : Tuple = bbox[2] * scale_y
lowerCAmelCase_ : str = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase_ : Optional[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax])
elif i == 2: # bottom-left
lowerCAmelCase_ : Union[str, Any] = cva.resize(snake_case__ , (divid_point_x, output_size[0] - divid_point_y))
lowerCAmelCase_ : List[str] = img
for bbox in img_annos:
lowerCAmelCase_ : Union[str, Any] = bbox[1] * scale_x
lowerCAmelCase_ : Any = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase_ : List[Any] = bbox[3] * scale_x
lowerCAmelCase_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax])
else: # bottom-right
lowerCAmelCase_ : List[str] = cva.resize(
snake_case__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y))
lowerCAmelCase_ : str = img
for bbox in img_annos:
lowerCAmelCase_ : Dict = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase_ : Any = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase_ : Tuple = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase_ : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax])
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCAmelCase_ : str = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def UpperCamelCase ( snake_case__):
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase_ : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(snake_case__) for _ in range(snake_case__))
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 683 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_lowercase = logging.getLogger(__name__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
lowerCAmelCase_ : List[Any] = bnb_quantization_config.load_in_abit
lowerCAmelCase_ : Optional[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed.")
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed.")
lowerCAmelCase_ : List[str] = []
# custom device map
if isinstance(snake_case__ , snake_case__) and len(device_map.keys()) > 1:
lowerCAmelCase_ : Union[str, Any] = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ : Union[str, Any] = get_keys_to_not_convert(snake_case__)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__)
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : int = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__)
# compatibility with peft
lowerCAmelCase_ : Optional[int] = load_in_abit
lowerCAmelCase_ : List[str] = load_in_abit
lowerCAmelCase_ : Optional[int] = get_parameter_device(snake_case__)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager.")
lowerCAmelCase_ : Union[str, Any] = replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
# convert param to the right dtype
lowerCAmelCase_ : Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ : Optional[int] = name.replace(".weight" , "").replace(".bias" , "")
lowerCAmelCase_ : Optional[int] = getattr(snake_case__ , snake_case__ , snake_case__)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(snake_case__):
param.to(snake_case__)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda.")
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ : str = replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__)
lowerCAmelCase_ : Optional[int] = get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ : Optional[Any] = True
lowerCAmelCase_ : Optional[int] = any(x in list(device_map.values()) for x in ["cpu", "disk"])
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None):
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ : Any = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization.")
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`.")
if isinstance(snake_case__ , snake_case__):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'.")
lowerCAmelCase_ : Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Union[str, Any] = special_dtypes
lowerCAmelCase_ : Union[str, Any] = no_split_module_classes
lowerCAmelCase_ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ : Tuple = get_balanced_memory(
snake_case__ , low_zero=(device_map == "balanced_low_0") , max_memory=snake_case__ , **snake_case__ , )
lowerCAmelCase_ : Tuple = max_memory
lowerCAmelCase_ : Optional[Any] = infer_auto_device_map(snake_case__ , **snake_case__)
if isinstance(snake_case__ , snake_case__):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ : Union[str, Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ : List[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ")
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit")
del device_map_without_some_modules
return device_map
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
if modules_to_not_convert is None:
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug.")
return model
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ):
lowerCAmelCase_ : str = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ : Optional[int] = []
current_key_name.append(snake_case__)
if isinstance(snake_case__ , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ : Optional[int] = ".".join(snake_case__)
lowerCAmelCase_ : List[str] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ : List[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False")
lowerCAmelCase_ : List[str] = module.weight.data
if module.bias is not None:
lowerCAmelCase_ : Any = module.bias.data
bnb_module.requires_grad_(snake_case__)
setattr(snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = True
if len(list(module.children())) > 0:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = _replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def UpperCamelCase ( snake_case__):
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ : List[Any] = deepcopy(snake_case__) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ : Dict = find_tied_parameters(snake_case__)
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : List[str] = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ : Optional[Any] = sum(snake_case__ , [])
lowerCAmelCase_ : List[Any] = len(snake_case__) > 0
# Check if it is a base model
lowerCAmelCase_ : List[str] = False
if hasattr(snake_case__ , "base_model_prefix"):
lowerCAmelCase_ : Tuple = not hasattr(snake_case__ , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ : Union[str, Any] = list(model.named_children())
lowerCAmelCase_ : Optional[int] = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ : Any = set(snake_case__) - set(snake_case__)
lowerCAmelCase_ : Tuple = list(set(snake_case__)) + list(snake_case__)
# remove ".weight" from the keys
lowerCAmelCase_ : List[str] = [".weight", ".bias"]
lowerCAmelCase_ : Tuple = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ : str = name.replace(snake_case__ , "")
filtered_module_names.append(snake_case__)
return filtered_module_names
def UpperCamelCase ( snake_case__):
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit):
return True
return False
def UpperCamelCase ( snake_case__):
return next(parameter.parameters()).device
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__)
lowerCAmelCase_ : str = param_name
lowerCAmelCase_ : Tuple = model
if "." in tensor_name:
lowerCAmelCase_ : Dict = tensor_name.split(".")
for split in splits[:-1]:
lowerCAmelCase_ : Any = getattr(snake_case__ , snake_case__)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ : Union[str, Any] = new_module
lowerCAmelCase_ : Any = splits[-1]
# offload weights
lowerCAmelCase_ : List[Any] = False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__)
if hasattr(module._parameters[tensor_name] , "SCB"):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__)
offload_weight(snake_case__ , param_name.replace("weight" , "SCB") , snake_case__ , index=snake_case__)
set_module_tensor_to_device(snake_case__ , snake_case__ , "meta" , dtype=snake_case__ , value=torch.empty(*param.size()))
| 683 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Dict=7 ,lowerCAmelCase__ : Union[str, Any]=3 ,lowerCAmelCase__ : Any=30 ,lowerCAmelCase__ : str=4_00 ,lowerCAmelCase__ : Optional[int]=True ,lowerCAmelCase__ : Dict=None ,lowerCAmelCase__ : List[Any]=True ,lowerCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] ,lowerCAmelCase__ : List[str]=[0.5, 0.5, 0.5] ,lowerCAmelCase__ : List[str]=True ,lowerCAmelCase__ : Optional[Any]=1 / 2_55 ,lowerCAmelCase__ : Dict=True ,) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : str = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Any = min_resolution
lowerCAmelCase_ : List[str] = max_resolution
lowerCAmelCase_ : str = do_resize
lowerCAmelCase_ : List[str] = size
lowerCAmelCase_ : int = do_normalize
lowerCAmelCase_ : List[Any] = image_mean
lowerCAmelCase_ : Dict = image_std
lowerCAmelCase_ : Tuple = do_rescale
lowerCAmelCase_ : List[str] = rescale_factor
lowerCAmelCase_ : Tuple = do_pad
def UpperCAmelCase_ ( self : Dict ) -> int:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : int=False ) -> int:
'''simple docstring'''
if not batched:
lowerCAmelCase_ : Union[str, Any] = image_inputs[0]
if isinstance(lowerCAmelCase__ ,Image.Image ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = image.size
else:
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase_ : Union[str, Any] = int(self.size["shortest_edge"] * h / w )
lowerCAmelCase_ : str = self.size["shortest_edge"]
elif w > h:
lowerCAmelCase_ : Optional[Any] = self.size["shortest_edge"]
lowerCAmelCase_ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
lowerCAmelCase_ : Dict = self.size["shortest_edge"]
lowerCAmelCase_ : Union[str, Any] = self.size["shortest_edge"]
else:
lowerCAmelCase_ : str = []
for image in image_inputs:
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase_ : str = max(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : item[0] )[0]
lowerCAmelCase_ : Optional[int] = max(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = DeformableDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : int = DeformableDetrImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ ,"image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ ,"image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ ,"do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ ,"do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ ,"do_rescale" ) )
self.assertTrue(hasattr(lowerCAmelCase__ ,"do_pad" ) )
self.assertTrue(hasattr(lowerCAmelCase__ ,"size" ) )
def UpperCAmelCase_ ( self : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad ,lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size ,{"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad ,lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ ,Image.Image )
# Test not batched input
lowerCAmelCase_ : Dict = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ,batched=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = image_processing(lowerCAmelCase__ ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase__ ,numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ ,np.ndarray )
# Test not batched input
lowerCAmelCase_ : int = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase_ : Any = image_processing(lowerCAmelCase__ ,return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ,batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=lowerCAmelCase__ ,torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ ,torch.Tensor )
# Test not batched input
lowerCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase_ : str = image_processing(lowerCAmelCase__ ,return_tensors="pt" ).pixel_values
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ,batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" ,"r" ) as f:
lowerCAmelCase_ : Optional[int] = json.loads(f.read() )
lowerCAmelCase_ : Tuple = {"image_id": 3_97_69, "annotations": target}
# encode them
lowerCAmelCase_ : int = DeformableDetrImageProcessor()
lowerCAmelCase_ : str = image_processing(images=lowerCAmelCase__ ,annotations=lowerCAmelCase__ ,return_tensors="pt" )
# verify pixel values
lowerCAmelCase_ : Tuple = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape ,lowerCAmelCase__ )
lowerCAmelCase_ : List[str] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] ,lowerCAmelCase__ ,atol=1e-4 ) )
# verify area
lowerCAmelCase_ : Optional[Any] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] ,lowerCAmelCase__ ) )
# verify boxes
lowerCAmelCase_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape ,lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] ,lowerCAmelCase__ ,atol=1e-3 ) )
# verify image_id
lowerCAmelCase_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] ,lowerCAmelCase__ ) )
# verify is_crowd
lowerCAmelCase_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] ,lowerCAmelCase__ ) )
# verify class_labels
lowerCAmelCase_ : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] ,lowerCAmelCase__ ) )
# verify orig_size
lowerCAmelCase_ : List[Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] ,lowerCAmelCase__ ) )
# verify size
lowerCAmelCase_ : Dict = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] ,lowerCAmelCase__ ) )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" ,"r" ) as f:
lowerCAmelCase_ : str = json.loads(f.read() )
lowerCAmelCase_ : List[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
lowerCAmelCase_ : Union[str, Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCAmelCase_ : Tuple = DeformableDetrImageProcessor(format="coco_panoptic" )
lowerCAmelCase_ : Dict = image_processing(images=lowerCAmelCase__ ,annotations=lowerCAmelCase__ ,masks_path=lowerCAmelCase__ ,return_tensors="pt" )
# verify pixel values
lowerCAmelCase_ : Optional[int] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape ,lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] ,lowerCAmelCase__ ,atol=1e-4 ) )
# verify area
lowerCAmelCase_ : Optional[Any] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] ,lowerCAmelCase__ ) )
# verify boxes
lowerCAmelCase_ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape ,lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] ,lowerCAmelCase__ ,atol=1e-3 ) )
# verify image_id
lowerCAmelCase_ : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] ,lowerCAmelCase__ ) )
# verify is_crowd
lowerCAmelCase_ : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] ,lowerCAmelCase__ ) )
# verify class_labels
lowerCAmelCase_ : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] ,lowerCAmelCase__ ) )
# verify masks
lowerCAmelCase_ : Optional[int] = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() ,lowerCAmelCase__ )
# verify orig_size
lowerCAmelCase_ : Optional[int] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] ,lowerCAmelCase__ ) )
# verify size
lowerCAmelCase_ : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] ,lowerCAmelCase__ ) )
| 683 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'is_longer']
def __init__( self : Optional[int] ,lowerCAmelCase__ : List[Any]=64 ,lowerCAmelCase__ : Any=4_80_00 ,lowerCAmelCase__ : Optional[Any]=4_80 ,lowerCAmelCase__ : List[str]=10 ,lowerCAmelCase__ : List[Any]=10_24 ,lowerCAmelCase__ : Union[str, Any]=0.0 ,lowerCAmelCase__ : Tuple=False ,lowerCAmelCase__ : float = 0 ,lowerCAmelCase__ : float = 1_40_00 ,lowerCAmelCase__ : int = None ,lowerCAmelCase__ : str = "fusion" ,lowerCAmelCase__ : str = "repeatpad" ,**lowerCAmelCase__ : Union[str, Any] ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : Optional[Any] = top_db
lowerCAmelCase_ : str = truncation
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : str = fft_window_size
lowerCAmelCase_ : Dict = (fft_window_size >> 1) + 1
lowerCAmelCase_ : Dict = hop_length
lowerCAmelCase_ : Any = max_length_s
lowerCAmelCase_ : int = max_length_s * sampling_rate
lowerCAmelCase_ : Optional[int] = sampling_rate
lowerCAmelCase_ : int = frequency_min
lowerCAmelCase_ : Optional[Any] = frequency_max
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm=lowerCAmelCase__ ,mel_scale="htk" ,)
lowerCAmelCase_ : List[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=lowerCAmelCase__ ,min_frequency=lowerCAmelCase__ ,max_frequency=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCAmelCase_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : int = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = spectrogram(
lowerCAmelCase__ ,window_function(self.fft_window_size ,"hann" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=lowerCAmelCase__ ,log_mel="dB" ,)
return log_mel_spectrogram.T
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase_ : List[Any] = [0]
# randomly choose index for each part
lowerCAmelCase_ : str = np.random.choice(ranges[0] )
lowerCAmelCase_ : Optional[Any] = np.random.choice(ranges[1] )
lowerCAmelCase_ : Any = np.random.choice(ranges[2] )
lowerCAmelCase_ : str = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase_ : Optional[Any] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase_ : List[str] = torch.tensor(mel[None, None, :] )
lowerCAmelCase_ : List[Any] = torch.nn.functional.interpolate(
lowerCAmelCase__ ,size=[chunk_frames, 64] ,mode="bilinear" ,align_corners=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = mel_shrink[0][0].numpy()
lowerCAmelCase_ : str = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : np.array ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : int ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase_ : List[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase_ : str = len(lowerCAmelCase__ ) - max_length
lowerCAmelCase_ : Any = np.random.randint(0 ,overflow + 1 )
lowerCAmelCase_ : Dict = waveform[idx : idx + max_length]
lowerCAmelCase_ : List[str] = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase_ : Tuple = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : str = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase_ : List[str] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase_ : Dict = np.stack([mel, mel, mel, mel] ,axis=0 )
lowerCAmelCase_ : int = False
else:
lowerCAmelCase_ : str = self._random_mel_fusion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCAmelCase_ : Any = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase_ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase_ : List[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : int = np.stack(np.tile(lowerCAmelCase__ ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase_ : Optional[Any] = int(max_length / len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Tuple = np.stack(np.tile(lowerCAmelCase__ ,lowerCAmelCase__ ) )
lowerCAmelCase_ : List[Any] = np.pad(lowerCAmelCase__ ,(0, max_length - waveform.shape[0]) ,mode="constant" ,constant_values=0 )
if truncation == "fusion":
lowerCAmelCase_ : int = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters )
lowerCAmelCase_ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
lowerCAmelCase_ : str = self._np_extract_fbank_features(lowerCAmelCase__ ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : str = None ,lowerCAmelCase__ : Optional[str] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,**lowerCAmelCase__ : List[Any] ,) -> BatchFeature:
'''simple docstring'''
lowerCAmelCase_ : List[str] = truncation if truncation is not None else self.truncation
lowerCAmelCase_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Dict = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : Dict = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : Tuple = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Any = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray(lowerCAmelCase__ )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase_ : Optional[Any] = [
self._get_input_mel(lowerCAmelCase__ ,max_length if max_length else self.nb_max_samples ,lowerCAmelCase__ ,lowerCAmelCase__ )
for waveform in raw_speech
]
lowerCAmelCase_ : str = []
lowerCAmelCase_ : str = []
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase__ )
is_longer.append(lowerCAmelCase__ )
if truncation == "fusion" and sum(lowerCAmelCase__ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase_ : Any = np.random.randint(0 ,len(lowerCAmelCase__ ) )
lowerCAmelCase_ : Dict = True
if isinstance(input_mel[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase_ : List[Any] = [[longer] for longer in is_longer]
lowerCAmelCase_ : Optional[Any] = {"input_features": input_mel, "is_longer": is_longer}
lowerCAmelCase_ : Dict = BatchFeature(lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : List[str] = input_features.convert_to_tensors(lowerCAmelCase__ )
return input_features
| 683 | 1 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *lowerCAmelCase__ : int ,**lowerCAmelCase__ : str ) -> Tuple:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Tuple ,lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = pipeline("visual-question-answering" ,model="hf-internal-testing/tiny-vilt-random-vqa" )
lowerCAmelCase_ : str = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : Union[str, Any] ,lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = vqa_pipeline(lowerCAmelCase__ ,top_k=1 )
self.assertEqual(
lowerCAmelCase__ ,[
[{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ )}],
[{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ )}],
] ,)
@require_torch
def UpperCAmelCase_ ( self : str ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = pipeline("visual-question-answering" ,model="hf-internal-testing/tiny-vilt-random-vqa" )
lowerCAmelCase_ : Any = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCAmelCase_ : Optional[Any] = "How many cats are there?"
lowerCAmelCase_ : Dict = vqa_pipeline(image=lowerCAmelCase__ ,question="How many cats are there?" ,top_k=2 )
self.assertEqual(
lowerCAmelCase__ ,[{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ )}, {"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ )}] )
lowerCAmelCase_ : Optional[Any] = vqa_pipeline({"image": image, "question": question} ,top_k=2 )
self.assertEqual(
lowerCAmelCase__ ,[{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ )}, {"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ )}] )
@slow
@require_torch
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : str = pipeline("visual-question-answering" ,model="dandelin/vilt-b32-finetuned-vqa" )
lowerCAmelCase_ : int = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCAmelCase_ : Dict = "How many cats are there?"
lowerCAmelCase_ : Optional[Any] = vqa_pipeline(image=lowerCAmelCase__ ,question=lowerCAmelCase__ ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ,decimals=4 ) ,[{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
lowerCAmelCase_ : int = vqa_pipeline({"image": image, "question": question} ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ,decimals=4 ) ,[{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}] )
lowerCAmelCase_ : List[str] = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] ,top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ,decimals=4 ) ,[[{"score": 0.8_799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2 ,)
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
pass
| 683 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_lowercase = Lock()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__)
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase_ : Optional[Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase_ : Any = min(snake_case__ , snake_case__)
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__)
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase_ : Dict = max(snake_case__ , snake_case__)
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__)
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : int = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe())
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase_ : Tuple = Pipe()
lowerCAmelCase_ : Optional[int] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ))
lowerCAmelCase_ : int = temp_rs
lowerCAmelCase_ : List[Any] = temp_rr
for i in range(1 , len(snake_case__) - 1):
lowerCAmelCase_ : Dict = Pipe()
lowerCAmelCase_ : List[str] = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ))
lowerCAmelCase_ : Dict = temp_rs
lowerCAmelCase_ : Optional[Any] = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__) - 1,
arr[len(snake_case__) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__) - 1],
) , ))
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__)):
lowerCAmelCase_ : Union[str, Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = list(range(10 , 0 , -1))
print("Initial List")
print(*snake_case__)
lowerCAmelCase_ : Tuple = odd_even_transposition(snake_case__)
print("Sorted List\n")
print(*snake_case__)
if __name__ == "__main__":
main()
| 683 | 1 |
_lowercase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowercase = [None] * 10000000
_lowercase = True
_lowercase = False
def UpperCamelCase ( snake_case__):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCAmelCase_ : Dict = chain(next_number(snake_case__))
lowerCAmelCase_ : Optional[Any] = number_chain
while number < 10_00_00_00:
lowerCAmelCase_ : List[Any] = number_chain
number *= 10
return number_chain
def UpperCamelCase ( snake_case__ = 10_00_00_00):
for i in range(1 , snake_case__):
if CHAINS[i] is None:
chain(i + 1)
return CHAINS[:number].count(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 683 |
from typing import Any
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validation(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
# Creates data structures and fill initial step
lowerCAmelCase_ : dict = {}
lowerCAmelCase_ : dict = {}
for state in states_space:
lowerCAmelCase_ : List[Any] = observations_space[0]
lowerCAmelCase_ : int = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase_ : Dict = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case__)):
lowerCAmelCase_ : List[Any] = observations_space[o]
lowerCAmelCase_ : Optional[Any] = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase_ : List[Any] = ""
lowerCAmelCase_ : Tuple = -1
for k_state in states_space:
lowerCAmelCase_ : int = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Optional[Any] = k_state
# Update probabilities and pointers dicts
lowerCAmelCase_ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase_ : Any = arg_max
# The final observation
lowerCAmelCase_ : List[Any] = observations_space[len(snake_case__) - 1]
# argmax for given final observation
lowerCAmelCase_ : List[str] = ""
lowerCAmelCase_ : List[str] = -1
for k_state in states_space:
lowerCAmelCase_ : List[str] = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase_ : List[str] = probability
lowerCAmelCase_ : Tuple = k_state
lowerCAmelCase_ : str = arg_max
# Process pointers backwards
lowerCAmelCase_ : int = last_state
lowerCAmelCase_ : int = []
for o in range(len(snake_case__) - 1 , -1 , -1):
result.append(snake_case__)
lowerCAmelCase_ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
_validate_not_empty(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
_validate_lists(snake_case__ , snake_case__)
_validate_dicts(
snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
]):
raise ValueError("There's an empty parameter")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_list(snake_case__ , "observations_space")
_validate_list(snake_case__ , "states_space")
def UpperCamelCase ( snake_case__ , snake_case__):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list'''
raise ValueError(snake_case__)
else:
for x in _object:
if not isinstance(snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[Any] = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , ):
_validate_dict(snake_case__ , "initial_probabilities" , snake_case__)
_validate_nested_dict(snake_case__ , "transition_probabilities")
_validate_nested_dict(snake_case__ , "emission_probabilities")
def UpperCamelCase ( snake_case__ , snake_case__):
_validate_dict(_object , snake_case__ , snake_case__)
for x in _object.values():
_validate_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__)
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = False):
if not isinstance(_object , snake_case__):
lowerCAmelCase_ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object):
lowerCAmelCase_ : Dict = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case__)
if not all(isinstance(snake_case__ , snake_case__) for x in _object.values()):
lowerCAmelCase_ : Union[str, Any] = "nested dictionary " if nested else ""
lowerCAmelCase_ : Any = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case__)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 683 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['image_processor', 'tokenizer']
UpperCamelCase_ = 'CLIPImageProcessor'
UpperCamelCase_ = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : str ,lowerCAmelCase__ : Union[str, Any]=None ,lowerCAmelCase__ : Union[str, Any]=None ,**lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,lowerCAmelCase__ ,)
lowerCAmelCase_ : str = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase__ ,lowerCAmelCase__ )
def __call__( self : int ,lowerCAmelCase__ : Tuple=None ,lowerCAmelCase__ : List[Any]=None ,lowerCAmelCase__ : Optional[Any]=None ,**lowerCAmelCase__ : int ) -> str:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCAmelCase_ : int = self.tokenizer(lowerCAmelCase__ ,return_tensors=lowerCAmelCase__ ,**lowerCAmelCase__ )
if images is not None:
lowerCAmelCase_ : Any = self.image_processor(lowerCAmelCase__ ,return_tensors=lowerCAmelCase__ ,**lowerCAmelCase__ )
if text is not None and images is not None:
lowerCAmelCase_ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) ,tensor_type=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Tuple ,**lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ ,**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : int ,*lowerCAmelCase__ : Dict ,**lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ ,**lowerCAmelCase__ )
@property
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = self.tokenizer.model_input_names
lowerCAmelCase_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 683 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'microsoft/speecht5_tts'
UpperCamelCase_ = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
UpperCamelCase_ = 'text_reader'
UpperCamelCase_ = SpeechTaProcessor
UpperCamelCase_ = SpeechTaForTextToSpeech
UpperCamelCase_ = SpeechTaHifiGan
UpperCamelCase_ = ['text']
UpperCamelCase_ = ['audio']
def UpperCAmelCase_ ( self : Dict ) -> Any:
'''simple docstring'''
if self.post_processor is None:
lowerCAmelCase_ : Any = "microsoft/speecht5_hifigan"
super().setup()
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Optional[int]=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Any = self.pre_processor(text=lowerCAmelCase__ ,return_tensors="pt" ,truncation=lowerCAmelCase__ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowerCAmelCase_ : str = load_dataset("Matthijs/cmu-arctic-xvectors" ,split="validation" )
lowerCAmelCase_ : List[Any] = torch.tensor(embeddings_dataset[73_05]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : str ) -> Any:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(lowerCAmelCase__ ).cpu().detach()
| 683 | 1 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 683 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowercase = None
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case__ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def UpperCamelCase ( snake_case__):
def remove_articles(snake_case__):
return ARTICLES_REGEX.sub(" " , snake_case__)
def white_space_fix(snake_case__):
return " ".join(text.split())
def remove_punc(snake_case__):
lowerCAmelCase_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def UpperCamelCase ( snake_case__):
if not s:
return []
return normalize_answer(snake_case__).split()
def UpperCamelCase ( snake_case__ , snake_case__):
return int(normalize_answer(snake_case__) == normalize_answer(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = get_tokens(snake_case__)
lowerCAmelCase_ : Union[str, Any] = get_tokens(snake_case__)
lowerCAmelCase_ : Any = collections.Counter(snake_case__) & collections.Counter(snake_case__)
lowerCAmelCase_ : Dict = sum(common.values())
if len(snake_case__) == 0 or len(snake_case__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : int = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : int = qa["id"]
lowerCAmelCase_ : Any = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCAmelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : Any = max(compute_exact(snake_case__ , snake_case__) for a in gold_answers)
lowerCAmelCase_ : Optional[Any] = max(compute_fa(snake_case__ , snake_case__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = {}
for qid, s in scores.items():
lowerCAmelCase_ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : List[str] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase_ : Union[str, Any] = s
return new_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
if not qid_list:
lowerCAmelCase_ : Any = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCAmelCase_ : Tuple = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
plt.step(snake_case__ , snake_case__ , color="b" , alpha=0.2 , where="post")
plt.fill_between(snake_case__ , snake_case__ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(snake_case__)
plt.savefig(snake_case__)
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
lowerCAmelCase_ : List[Any] = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Tuple = [1.0]
lowerCAmelCase_ : Tuple = [0.0]
lowerCAmelCase_ : Dict = 0.0
for i, qid in enumerate(snake_case__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : str = true_pos / float(i + 1)
lowerCAmelCase_ : Union[str, Any] = true_pos / float(snake_case__)
if i == len(snake_case__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__)
recalls.append(snake_case__)
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return {"ap": 100.0 * avg_prec}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
if out_image_dir and not os.path.exists(snake_case__):
os.makedirs(snake_case__)
lowerCAmelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase_ : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ : Dict = {k: float(snake_case__) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : str = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case__ , snake_case__ , "pr_exact")
merge_eval(snake_case__ , snake_case__ , "pr_f1")
merge_eval(snake_case__ , snake_case__ , "pr_oracle")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if not qid_list:
return
lowerCAmelCase_ : Optional[Any] = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : Dict = np.ones_like(snake_case__) / float(len(snake_case__))
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(snake_case__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase_ : str = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : str = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
for i, qid in enumerate(snake_case__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : List[Any] = -1
else:
lowerCAmelCase_ : List[str] = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[Any] = cur_score
lowerCAmelCase_ : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(snake_case__), best_thresh
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = best_exact
lowerCAmelCase_ : List[str] = exact_thresh
lowerCAmelCase_ : Any = best_fa
lowerCAmelCase_ : List[str] = fa_thresh
def UpperCamelCase ( ):
with open(OPTS.data_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
lowerCAmelCase_ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCAmelCase_ : int = json.load(snake_case__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
else:
lowerCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
lowerCAmelCase_ : Tuple = make_qid_to_has_ans(snake_case__) # maps qid to True/False
lowerCAmelCase_ : Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_raw_scores(snake_case__ , snake_case__)
lowerCAmelCase_ : str = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Dict = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__)
if has_ans_qids:
lowerCAmelCase_ : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "HasAns")
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir)
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(snake_case__ , snake_case__)
else:
print(json.dumps(snake_case__ , indent=2))
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 683 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = 'timesformer'
def __init__( self : Optional[int] ,lowerCAmelCase__ : Union[str, Any]=2_24 ,lowerCAmelCase__ : Any=16 ,lowerCAmelCase__ : Tuple=3 ,lowerCAmelCase__ : List[str]=8 ,lowerCAmelCase__ : List[Any]=7_68 ,lowerCAmelCase__ : Optional[Any]=12 ,lowerCAmelCase__ : Optional[int]=12 ,lowerCAmelCase__ : Tuple=30_72 ,lowerCAmelCase__ : List[str]="gelu" ,lowerCAmelCase__ : Dict=0.0 ,lowerCAmelCase__ : List[Any]=0.0 ,lowerCAmelCase__ : List[Any]=0.02 ,lowerCAmelCase__ : Optional[Any]=1e-6 ,lowerCAmelCase__ : str=True ,lowerCAmelCase__ : Optional[int]="divided_space_time" ,lowerCAmelCase__ : List[Any]=0 ,**lowerCAmelCase__ : str ,) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = image_size
lowerCAmelCase_ : str = patch_size
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : List[str] = num_frames
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : Optional[int] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : Optional[int] = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Any = initializer_range
lowerCAmelCase_ : Optional[Any] = layer_norm_eps
lowerCAmelCase_ : Tuple = qkv_bias
lowerCAmelCase_ : str = attention_type
lowerCAmelCase_ : Any = drop_path_rate
| 683 |
from math import sqrt
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[int] = 0
for i in range(1 , int(sqrt(snake_case__) + 1)):
if n % i == 0 and i != sqrt(snake_case__):
total += i + n // i
elif i == sqrt(snake_case__):
total += i
return total - n
def UpperCamelCase ( snake_case__ = 1_00_00):
lowerCAmelCase_ : int = sum(
i
for i in range(1 , snake_case__)
if sum_of_divisors(sum_of_divisors(snake_case__)) == i and sum_of_divisors(snake_case__) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 683 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_lowercase = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
_lowercase = None
def UpperCamelCase ( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0.")
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file.")
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions.")
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout).")
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer.")
parser.add_argument(
"--na-prob-thresh" , "-t" , type=snake_case__ , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=snake_case__ , help="Save precision-recall curves to directory.")
parser.add_argument("--verbose" , "-v" , action="store_true")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : str = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : Dict = bool(qa["answers"]["text"])
return qid_to_has_ans
def UpperCamelCase ( snake_case__):
def remove_articles(snake_case__):
return ARTICLES_REGEX.sub(" " , snake_case__)
def white_space_fix(snake_case__):
return " ".join(text.split())
def remove_punc(snake_case__):
lowerCAmelCase_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(snake_case__):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__))))
def UpperCamelCase ( snake_case__):
if not s:
return []
return normalize_answer(snake_case__).split()
def UpperCamelCase ( snake_case__ , snake_case__):
return int(normalize_answer(snake_case__) == normalize_answer(snake_case__))
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Optional[int] = get_tokens(snake_case__)
lowerCAmelCase_ : Union[str, Any] = get_tokens(snake_case__)
lowerCAmelCase_ : Any = collections.Counter(snake_case__) & collections.Counter(snake_case__)
lowerCAmelCase_ : Dict = sum(common.values())
if len(snake_case__) == 0 or len(snake_case__) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase_ : List[Any] = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : int = 1.0 * num_same / len(snake_case__)
lowerCAmelCase_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCamelCase ( snake_case__ , snake_case__):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase_ : int = qa["id"]
lowerCAmelCase_ : Any = [t for t in qa["answers"]["text"] if normalize_answer(snake_case__)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase_ : Any = [""]
if qid not in preds:
print(F'''Missing prediction for {qid}''')
continue
lowerCAmelCase_ : Tuple = preds[qid]
# Take max over all gold answers
lowerCAmelCase_ : Any = max(compute_exact(snake_case__ , snake_case__) for a in gold_answers)
lowerCAmelCase_ : Optional[Any] = max(compute_fa(snake_case__ , snake_case__) for a in gold_answers)
return exact_scores, fa_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = {}
for qid, s in scores.items():
lowerCAmelCase_ : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase_ : List[str] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase_ : Union[str, Any] = s
return new_scores
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__=None):
if not qid_list:
lowerCAmelCase_ : Any = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(fa_scores.values()) / total),
("total", total),
])
else:
lowerCAmelCase_ : Tuple = len(snake_case__)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list) / total),
("total", total),
])
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__):
for k in new_eval:
lowerCAmelCase_ : Union[str, Any] = new_eval[k]
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
plt.step(snake_case__ , snake_case__ , color="b" , alpha=0.2 , where="post")
plt.fill_between(snake_case__ , snake_case__ , step="post" , alpha=0.2 , color="b")
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(snake_case__)
plt.savefig(snake_case__)
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None):
lowerCAmelCase_ : List[Any] = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
lowerCAmelCase_ : Dict = 0.0
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.0
lowerCAmelCase_ : Tuple = [1.0]
lowerCAmelCase_ : Tuple = [0.0]
lowerCAmelCase_ : Dict = 0.0
for i, qid in enumerate(snake_case__):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase_ : str = true_pos / float(i + 1)
lowerCAmelCase_ : Union[str, Any] = true_pos / float(snake_case__)
if i == len(snake_case__) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(snake_case__)
recalls.append(snake_case__)
if out_image:
plot_pr_curve(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return {"ap": 100.0 * avg_prec}
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
if out_image_dir and not os.path.exists(snake_case__):
os.makedirs(snake_case__)
lowerCAmelCase_ : Any = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase_ : Any = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_exact.png") , title="Precision-Recall curve for Exact Match score" , )
lowerCAmelCase_ : Dict = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_f1.png") , title="Precision-Recall curve for F1 score" , )
lowerCAmelCase_ : Dict = {k: float(snake_case__) for k, v in qid_to_has_ans.items()}
lowerCAmelCase_ : str = make_precision_recall_eval(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , out_image=os.path.join(snake_case__ , "pr_oracle.png") , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(snake_case__ , snake_case__ , "pr_exact")
merge_eval(snake_case__ , snake_case__ , "pr_f1")
merge_eval(snake_case__ , snake_case__ , "pr_oracle")
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
if not qid_list:
return
lowerCAmelCase_ : Optional[Any] = [na_probs[k] for k in qid_list]
lowerCAmelCase_ : Dict = np.ones_like(snake_case__) / float(len(snake_case__))
plt.hist(snake_case__ , weights=snake_case__ , bins=20 , range=(0.0, 1.0))
plt.xlabel("Model probability of no-answer")
plt.ylabel("Proportion of dataset")
plt.title(F'''Histogram of no-answer probability: {name}''')
plt.savefig(os.path.join(snake_case__ , F'''na_prob_hist_{name}.png'''))
plt.clf()
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ : Dict = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase_ : str = num_no_ans
lowerCAmelCase_ : List[str] = cur_score
lowerCAmelCase_ : List[Any] = 0.0
lowerCAmelCase_ : str = sorted(snake_case__ , key=lambda snake_case__: na_probs[k])
for i, qid in enumerate(snake_case__):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase_ : Union[str, Any] = scores[qid]
else:
if preds[qid]:
lowerCAmelCase_ : List[Any] = -1
else:
lowerCAmelCase_ : List[str] = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase_ : Optional[Any] = cur_score
lowerCAmelCase_ : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(snake_case__), best_thresh
def UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ , lowerCAmelCase_ : Dict = find_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__)
lowerCAmelCase_ : List[str] = best_exact
lowerCAmelCase_ : List[str] = exact_thresh
lowerCAmelCase_ : Any = best_fa
lowerCAmelCase_ : List[str] = fa_thresh
def UpperCamelCase ( ):
with open(OPTS.data_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
lowerCAmelCase_ : List[Any] = dataset_json["data"]
with open(OPTS.pred_file) as f:
lowerCAmelCase_ : int = json.load(snake_case__)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase_ : Optional[int] = json.load(snake_case__)
else:
lowerCAmelCase_ : List[Any] = {k: 0.0 for k in preds}
lowerCAmelCase_ : Tuple = make_qid_to_has_ans(snake_case__) # maps qid to True/False
lowerCAmelCase_ : Any = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase_ : List[str] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_raw_scores(snake_case__ , snake_case__)
lowerCAmelCase_ : str = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Dict = apply_no_ans_threshold(snake_case__ , snake_case__ , snake_case__ , OPTS.na_prob_thresh)
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__)
if has_ans_qids:
lowerCAmelCase_ : str = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "HasAns")
if no_ans_qids:
lowerCAmelCase_ : Union[str, Any] = make_eval_dict(snake_case__ , snake_case__ , qid_list=snake_case__)
merge_eval(snake_case__ , snake_case__ , "NoAns")
if OPTS.na_prob_file:
find_all_best_thresh(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , OPTS.out_image_dir)
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "hasAns")
histogram_na_prob(snake_case__ , snake_case__ , OPTS.out_image_dir , "noAns")
if OPTS.out_file:
with open(OPTS.out_file , "w") as f:
json.dump(snake_case__ , snake_case__)
else:
print(json.dumps(snake_case__ , indent=2))
if __name__ == "__main__":
_lowercase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 683 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 683 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features']
def __init__( self : Union[str, Any] ,lowerCAmelCase__ : List[str]=80 ,lowerCAmelCase__ : Dict=1_60_00 ,lowerCAmelCase__ : Any=1_60 ,lowerCAmelCase__ : Union[str, Any]=30 ,lowerCAmelCase__ : Optional[int]=4_00 ,lowerCAmelCase__ : List[str]=0.0 ,lowerCAmelCase__ : int=False ,**lowerCAmelCase__ : Union[str, Any] ,) -> Any:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
lowerCAmelCase_ : List[str] = n_fft
lowerCAmelCase_ : Union[str, Any] = hop_length
lowerCAmelCase_ : int = chunk_length
lowerCAmelCase_ : List[Any] = chunk_length * sampling_rate
lowerCAmelCase_ : Dict = self.n_samples // hop_length
lowerCAmelCase_ : List[Any] = sampling_rate
lowerCAmelCase_ : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=lowerCAmelCase__ ,min_frequency=0.0 ,max_frequency=8_000.0 ,sampling_rate=lowerCAmelCase__ ,norm="slaney" ,mel_scale="slaney" ,)
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : np.array ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ : str = spectrogram(
lowerCAmelCase__ ,window_function(self.n_fft ,"hann" ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters ,log_mel="log10" ,)
lowerCAmelCase_ : Dict = log_spec[:, :-1]
lowerCAmelCase_ : Optional[Any] = np.maximum(lowerCAmelCase__ ,log_spec.max() - 8.0 )
lowerCAmelCase_ : Optional[int] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase_ ( lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
lowerCAmelCase_ : int = np.array(lowerCAmelCase__ ,np.intaa )
lowerCAmelCase_ : Any = []
for vector, length in zip(lowerCAmelCase__ ,attention_mask.sum(-1 ) ):
lowerCAmelCase_ : str = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCAmelCase_ : Tuple = padding_value
normed_input_values.append(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : bool = True ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[str] = "max_length" ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,**lowerCAmelCase__ : Union[str, Any] ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : Any = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : List[Any] = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Union[str, Any] = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : List[str] = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Any = [np.asarray([raw_speech] ).T]
lowerCAmelCase_ : int = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
lowerCAmelCase_ : Tuple = self.pad(
lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=max_length if max_length else self.n_samples ,truncation=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=return_attention_mask or do_normalize ,)
# zero-mean and unit-variance normalization
if do_normalize:
lowerCAmelCase_ : Optional[int] = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] ,attention_mask=padded_inputs["attention_mask"] ,padding_value=self.padding_value ,)
lowerCAmelCase_ : Tuple = np.stack(padded_inputs["input_features"] ,axis=0 )
# make sure list is in array format
lowerCAmelCase_ : int = padded_inputs.get("input_features" ).transpose(2 ,0 ,1 )
lowerCAmelCase_ : str = [self._np_extract_fbank_features(lowerCAmelCase__ ) for waveform in input_features[0]]
if isinstance(input_features[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_features]
else:
lowerCAmelCase_ : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCAmelCase_ : str = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
lowerCAmelCase_ : Any = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
def UpperCAmelCase_ ( self : List[Any] ) -> Dict[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 683 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_lowercase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_lowercase = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ):
lowerCAmelCase_ : str = (
list(range(ord("!") , ord("~") + 1)) + list(range(ord("¡") , ord("¬") + 1)) + list(range(ord("®") , ord("ÿ") + 1))
)
lowerCAmelCase_ : Tuple = bs[:]
lowerCAmelCase_ : Dict = 0
for b in range(2**8):
if b not in bs:
bs.append(snake_case__)
cs.append(2**8 + n)
n += 1
lowerCAmelCase_ : Union[str, Any] = [chr(snake_case__) for n in cs]
return dict(zip(snake_case__ , snake_case__))
def UpperCamelCase ( snake_case__):
lowerCAmelCase_ : Optional[Any] = set()
lowerCAmelCase_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase_ : Union[str, Any] = char
return pairs
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['input_ids', 'attention_mask']
def __init__( self : str ,lowerCAmelCase__ : Dict ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any]="replace" ,lowerCAmelCase__ : Dict="<s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : str="</s>" ,lowerCAmelCase__ : Optional[Any]="<s>" ,lowerCAmelCase__ : List[Any]="<unk>" ,lowerCAmelCase__ : Union[str, Any]="<pad>" ,lowerCAmelCase__ : int="<mask>" ,lowerCAmelCase__ : Any=False ,**lowerCAmelCase__ : int ,) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else bos_token
lowerCAmelCase_ : Tuple = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else eos_token
lowerCAmelCase_ : Dict = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else sep_token
lowerCAmelCase_ : int = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else cls_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else unk_token
lowerCAmelCase_ : List[str] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase__ ,lstrip=lowerCAmelCase__ ,rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ ,bos_token=lowerCAmelCase__ ,eos_token=lowerCAmelCase__ ,unk_token=lowerCAmelCase__ ,sep_token=lowerCAmelCase__ ,cls_token=lowerCAmelCase__ ,pad_token=lowerCAmelCase__ ,mask_token=lowerCAmelCase__ ,add_prefix_space=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
with open(lowerCAmelCase__ ,encoding="utf-8" ) as vocab_handle:
lowerCAmelCase_ : List[Any] = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ : List[Any] = errors # how to handle errors in decoding
lowerCAmelCase_ : Optional[Any] = bytes_to_unicode()
lowerCAmelCase_ : int = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ ,encoding="utf-8" ) as merges_handle:
lowerCAmelCase_ : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase_ : Dict = dict(zip(lowerCAmelCase__ ,range(len(lowerCAmelCase__ ) ) ) )
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase_ : Optional[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase_ : Union[str, Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
lowerCAmelCase_ : Dict = min(lowerCAmelCase__ ,key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ ,float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase_ , lowerCAmelCase_ : Dict = bigram
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Any = 0
while i < len(lowerCAmelCase__ ):
try:
lowerCAmelCase_ : Optional[int] = word.index(lowerCAmelCase__ ,lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase_ : Tuple = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase_ : Optional[Any] = tuple(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
lowerCAmelCase_ : Dict = get_pairs(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = " ".join(lowerCAmelCase__ )
lowerCAmelCase_ : Any = word
return word
def UpperCAmelCase_ ( self : Optional[int] ,lowerCAmelCase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for token in re.findall(self.pat ,lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(" " ) )
return bpe_tokens
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : int ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCAmelCase__ ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = "".join(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" ,errors=self.errors )
return text
def UpperCAmelCase_ ( self : Optional[Any] ,lowerCAmelCase__ : str ,lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ : Optional[Any] = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ : Tuple = os.path.join(
lowerCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCAmelCase__ ,ensure_ascii=lowerCAmelCase__ ) + "\n" )
lowerCAmelCase_ : Tuple = 0
with open(lowerCAmelCase__ ,"w" ,encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase_ : Optional[Any] = token_index
writer.write(" ".join(lowerCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase_ ( self : Dict ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ,lowerCAmelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ ,token_ids_a=lowerCAmelCase__ ,already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCAmelCase_ ( self : Any ,lowerCAmelCase__ : List[int] ,lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : List[str] ,lowerCAmelCase__ : Optional[int]=False ,**lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = kwargs.pop("add_prefix_space" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
lowerCAmelCase_ : Union[str, Any] = " " + text
return (text, kwargs)
| 683 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.