code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
A__ : Dict = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
A__ : Union[str, Any] = logging.getLogger()
def UpperCamelCase( ):
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
return args.f
def UpperCamelCase( __UpperCamelCase : str ,__UpperCamelCase : int="eval" ):
lowerCAmelCase_ : Union[str, Any] = os.path.join(__UpperCamelCase ,f"""{split}_results.json""" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase ,'''r''' ) as f:
return json.load(__UpperCamelCase )
raise ValueError(f"""can\'t find {path}""" )
A__ : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __snake_case ( _a ):
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : str = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : List[Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__):
run_flax_glue.main()
lowerCAmelCase_ : List[Any] = get_results(SCREAMING_SNAKE_CASE__)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75)
@slow
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Optional[int] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Any = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__):
run_clm_flax.main()
lowerCAmelCase_ : List[Any] = get_results(SCREAMING_SNAKE_CASE__)
self.assertLess(result['''eval_perplexity'''] , 1_0_0)
@slow
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : Any = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : List[Any] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__):
run_summarization_flax.main()
lowerCAmelCase_ : Optional[int] = get_results(SCREAMING_SNAKE_CASE__ , split='''test''')
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0)
self.assertGreaterEqual(result['''test_rouge2'''] , 2)
self.assertGreaterEqual(result['''test_rougeL'''] , 7)
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7)
@slow
def UpperCAmelCase__ ( self : Any):
lowerCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Union[str, Any] = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__):
run_mlm_flax.main()
lowerCAmelCase_ : Dict = get_results(SCREAMING_SNAKE_CASE__)
self.assertLess(result['''eval_perplexity'''] , 4_2)
@slow
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : List[str] = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__):
run_ta_mlm_flax.main()
lowerCAmelCase_ : Optional[Any] = get_results(SCREAMING_SNAKE_CASE__)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42)
@slow
def UpperCAmelCase__ ( self : List[str]):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowerCAmelCase_ : List[Any] = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase_ : List[str] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Any = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__):
run_flax_ner.main()
lowerCAmelCase_ : List[Any] = get_results(SCREAMING_SNAKE_CASE__)
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75)
self.assertGreaterEqual(result['''eval_f1'''] , 0.3)
@slow
def UpperCAmelCase__ ( self : Dict):
lowerCAmelCase_ : List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__):
run_qa.main()
lowerCAmelCase_ : Dict = get_results(SCREAMING_SNAKE_CASE__)
self.assertGreaterEqual(result['''eval_f1'''] , 3_0)
self.assertGreaterEqual(result['''eval_exact'''] , 3_0)
| 103 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def A_ ( snake_case ):
return 1 / (1 + np.exp(-z ))
def A_ ( snake_case , snake_case ):
return (-y * np.log(snake_case ) - (1 - y) * np.log(1 - h )).mean()
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Dict = np.dot(snake_case , snake_case )
return np.sum(y * scores - np.log(1 + np.exp(snake_case ) ) )
def A_ ( snake_case , snake_case , snake_case , snake_case=70000 ):
SCREAMING_SNAKE_CASE:List[str] = np.zeros(x.shape[1] )
for iterations in range(snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = np.dot(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Dict = sigmoid_function(snake_case )
SCREAMING_SNAKE_CASE:List[str] = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE:Any = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE:Dict = np.dot(snake_case , snake_case )
SCREAMING_SNAKE_CASE:Union[str, Any] = sigmoid_function(snake_case )
SCREAMING_SNAKE_CASE:Dict = cost_function(snake_case , snake_case )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
A_ = datasets.load_iris()
A_ = iris.data[:, :2]
A_ = (iris.target != 0) * 1
A_ = 0.1
A_ = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print("theta: ", theta) # printing the theta i.e our weights vector
def A_ ( snake_case ):
return sigmoid_function(
np.dot(snake_case , snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((A_) , (A_)) = (x[:, 0].min(), x[:, 0].max())
((A_) , (A_)) = (x[:, 1].min(), x[:, 1].max())
((A_) , (A_)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
A_ = np.c_[xxa.ravel(), xxa.ravel()]
A_ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 139 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
'''simple docstring'''
def __init__( self : str , __snake_case : Dict , __snake_case : Any=13 , __snake_case : Optional[Any]=7 , __snake_case : str=True , __snake_case : str=True , __snake_case : Dict=True , __snake_case : List[Any]=True , __snake_case : List[str]=99 , __snake_case : Union[str, Any]=16 , __snake_case : Optional[Any]=36 , __snake_case : Dict=6 , __snake_case : str=6 , __snake_case : Tuple=6 , __snake_case : Any=37 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=5_12 , __snake_case : str=16 , __snake_case : Optional[Any]=2 , __snake_case : Optional[int]=0.02 , __snake_case : List[Any]=3 , __snake_case : Tuple=4 , __snake_case : Optional[Any]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = embedding_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_hidden_groups
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : List[Any] ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : Tuple , __snake_case : Any , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Any , __snake_case : List[str] ):
UpperCAmelCase_ = AlbertModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
UpperCAmelCase_ = model(__snake_case , token_type_ids=__snake_case )
UpperCAmelCase_ = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Any , __snake_case : int , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : int , __snake_case : Any , __snake_case : Optional[Any] ):
UpperCAmelCase_ = AlbertForPreTraining(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , sentence_order_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase_ ( self : List[str] , __snake_case : Tuple , __snake_case : int , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[int] ):
UpperCAmelCase_ = AlbertForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : Any , __snake_case : str , __snake_case : Dict , __snake_case : str ):
UpperCAmelCase_ = AlbertForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[str] , __snake_case : Dict , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Any , __snake_case : Dict , __snake_case : str , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[int] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = AlbertForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Dict , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : List[str] ):
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = AlbertForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase : List[str] = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Tuple = True
def lowerCamelCase_ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : int=False ):
UpperCAmelCase_ = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class in get_values(__snake_case ):
UpperCAmelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case )
UpperCAmelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = AlbertModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowerCamelCase_ ( self : str ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__snake_case )
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*__snake_case )
@slow
def lowerCamelCase_ ( self : str ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = AlbertModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
class a ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = AlbertModel.from_pretrained('''albert-base-v2''' )
UpperCAmelCase_ = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCAmelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case )[0]
UpperCAmelCase_ = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase_ = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1E-4 ) )
| 356 |
import numpy as np
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : np.array ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 177 | 0 |
from collections import defaultdict
def UpperCamelCase (lowercase_: str , lowercase_: str ) -> bool:
A__ : Optional[Any] = first_str.lower().strip()
A__ : List[str] = second_str.lower().strip()
# Remove whitespace
A__ : int = first_str.replace(""" """ , """""" )
A__ : List[Any] = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
return False
# Default values for count should be 0
A__ : defaultdict[str, int] = defaultdict(_UpperCAmelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(_UpperCAmelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A_ : Tuple = input('Enter the first string ').strip()
A_ : Optional[int] = input('Enter the second string ').strip()
A_ : int = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 192 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 276 | 0 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> int:
'''simple docstring'''
snake_case : int = [0] * len(SCREAMING_SNAKE_CASE__ )
snake_case : str = []
snake_case : Optional[int] = []
snake_case : int = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if indegree[i] == 0:
queue.append(SCREAMING_SNAKE_CASE__ )
while queue:
snake_case : Union[str, Any] = queue.pop(0 )
cnt += 1
topo.append(SCREAMING_SNAKE_CASE__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(SCREAMING_SNAKE_CASE__ )
if cnt != len(SCREAMING_SNAKE_CASE__ ):
print('''Cycle exists''' )
else:
print(SCREAMING_SNAKE_CASE__ )
# Adjacency List of Graph
lowercase__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 83 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 1 |
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowerCAmelCase (__A):
"""simple docstring"""
_a = args.pruning_method
_a = args.threshold
_a = args.model_name_or_path.rstrip('''/''')
_a = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''')
_a = torch.load(os.path.join(__A , '''pytorch_model.bin'''))
_a = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_a = tensor
print(F'''Copied layer {name}''')
elif "classifier" in name or "qa_output" in name:
_a = tensor
print(F'''Copied layer {name}''')
elif "bias" in name:
_a = tensor
print(F'''Copied layer {name}''')
else:
if pruning_method == "magnitude":
_a = MagnitudeBinarizer.apply(inputs=__A , threshold=__A)
_a = tensor * mask
print(F'''Pruned layer {name}''')
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_a = name[:-6]
_a = model[F'''{prefix_}mask_scores''']
_a = TopKBinarizer.apply(__A , __A)
_a = tensor * mask
print(F'''Pruned layer {name}''')
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_a = name[:-6]
_a = model[F'''{prefix_}mask_scores''']
_a = ThresholdBinarizer.apply(__A , __A , __A)
_a = tensor * mask
print(F'''Pruned layer {name}''')
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_a = name[:-6]
_a = model[F'''{prefix_}mask_scores''']
_a , _a = -0.1, 1.1
_a = torch.sigmoid(__A)
_a = s * (r - l) + l
_a = s_bar.clamp(min=0.0 , max=1.0)
_a = tensor * mask
print(F'''Pruned layer {name}''')
else:
raise ValueError('''Unknown pruning method''')
if target_model_path is None:
_a = os.path.join(
os.path.dirname(__A) , F'''bertarized_{os.path.basename(__A)}''')
if not os.path.isdir(__A):
shutil.copytree(__A , __A)
print(F'''\nCreated folder {target_model_path}''')
torch.save(__A , os.path.join(__A , '''pytorch_model.bin'''))
print('''\nPruned model saved! See you later!''')
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
lowercase_ = parser.parse_args()
main(args)
| 211 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : CommonSchedulerState
# setable values
__lowerCamelCase : jnp.ndarray
__lowerCamelCase : jnp.ndarray
__lowerCamelCase : Optional[int] = None
@classmethod
def a__ (cls , A , A , A ) -> str:
"""simple docstring"""
return cls(common=A , init_noise_sigma=A , timesteps=A )
@dataclass
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : DDPMSchedulerState
class __A ( A , A ):
'''simple docstring'''
__lowerCamelCase : Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase : jnp.dtype
@property
def a__ (self ) -> List[str]:
"""simple docstring"""
return True
@register_to_config
def __init__(self , A = 1_000 , A = 0.0001 , A = 0.02 , A = "linear" , A = None , A = "fixed_small" , A = True , A = "epsilon" , A = jnp.floataa , ) -> Union[str, Any]:
"""simple docstring"""
_a = dtype
def a__ (self , A = None ) -> DDPMSchedulerState:
"""simple docstring"""
if common is None:
_a = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_a = jnp.array(1.0 , dtype=self.dtype )
_a = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=A , init_noise_sigma=A , timesteps=A , )
def a__ (self , A , A , A = None ) -> jnp.ndarray:
"""simple docstring"""
return sample
def a__ (self , A , A , A = () ) -> DDPMSchedulerState:
"""simple docstring"""
_a = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_a = (jnp.arange(0 , A ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=A , timesteps=A , )
def a__ (self , A , A , A=None , A=None ) -> int:
"""simple docstring"""
_a = state.common.alphas_cumprod[t]
_a = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_a = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_a = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_a = jnp.clip(A , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_a = jnp.log(jnp.clip(A , a_min=1E-20 ) )
elif variance_type == "fixed_large":
_a = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_a = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_a = variance
_a = state.common.betas[t]
_a = (predicted_variance + 1) / 2
_a = frac * max_log + (1 - frac) * min_log
return variance
def a__ (self , A , A , A , A , A = None , A = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
"""simple docstring"""
_a = timestep
if key is None:
_a = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_a , _a = jnp.split(A , sample.shape[1] , axis=1 )
else:
_a = None
# 1. compute alphas, betas
_a = state.common.alphas_cumprod[t]
_a = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_a = 1 - alpha_prod_t
_a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_a = model_output
elif self.config.prediction_type == "v_prediction":
_a = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_a = jnp.clip(A , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_a = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_a = jax.random.split(A , num=1 )
_a = jax.random.normal(A , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(A , A , predicted_variance=A ) ** 0.5) * noise
_a = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=A , state=A )
def a__ (self , A , A , A , A , ) -> jnp.ndarray:
"""simple docstring"""
return add_noise_common(state.common , A , A , A )
def a__ (self , A , A , A , A , ) -> jnp.ndarray:
"""simple docstring"""
return get_velocity_common(state.common , A , A , A )
def __len__(self ) -> Tuple:
"""simple docstring"""
return self.config.num_train_timesteps
| 211 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _lowercase :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : str=32 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Tuple="None" , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Any=None , ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =relative_attention
__UpperCamelCase =position_biased_input
__UpperCamelCase =pos_att_type
__UpperCamelCase =scope
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =TFDebertaVaModel(config=UpperCamelCase__ )
__UpperCamelCase ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(UpperCamelCase__ )
__UpperCamelCase =model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Any:
'''simple docstring'''
__UpperCamelCase =TFDebertaVaForMaskedLM(config=UpperCamelCase__ )
__UpperCamelCase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCamelCase =model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFDebertaVaForSequenceClassification(config=UpperCamelCase__ )
__UpperCamelCase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCamelCase =model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFDebertaVaForTokenClassification(config=UpperCamelCase__ )
__UpperCamelCase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCamelCase =model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
'''simple docstring'''
__UpperCamelCase =TFDebertaVaForQuestionAnswering(config=UpperCamelCase__ )
__UpperCamelCase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__UpperCamelCase =model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : str ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =TFDebertaVaModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase_ ( self : int ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : int ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> str:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def UpperCAmelCase_ ( self : str ) -> str:
'''simple docstring'''
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
__UpperCamelCase =TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase =TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
__UpperCamelCase =tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__UpperCamelCase =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
__UpperCamelCase =tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 )
| 364 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = 42
class _lowercase ( __a , __a ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , UpperCamelCase__ : int = 32 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 20 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : str=77 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ) -> Any:
'''simple docstring'''
super().__init__()
__UpperCamelCase =num_attention_heads
__UpperCamelCase =attention_head_dim
__UpperCamelCase =num_attention_heads * attention_head_dim
__UpperCamelCase =additional_embeddings
__UpperCamelCase =time_embed_dim or inner_dim
__UpperCamelCase =embedding_proj_dim or embedding_dim
__UpperCamelCase =clip_embed_dim or embedding_dim
__UpperCamelCase =Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0 )
__UpperCamelCase =TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__ )
__UpperCamelCase =nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
if embedding_proj_norm_type is None:
__UpperCamelCase =None
elif embedding_proj_norm_type == "layer":
__UpperCamelCase =nn.LayerNorm(UpperCamelCase__ )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
__UpperCamelCase =nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
if encoder_hid_proj_type is None:
__UpperCamelCase =None
elif encoder_hid_proj_type == "linear":
__UpperCamelCase =nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
__UpperCamelCase =nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__ ) )
if added_emb_type == "prd":
__UpperCamelCase =nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__ ) )
elif added_emb_type is None:
__UpperCamelCase =None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
__UpperCamelCase =nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn='''gelu''' , attention_bias=UpperCamelCase__ , )
for d in range(UpperCamelCase__ )
] )
if norm_in_type == "layer":
__UpperCamelCase =nn.LayerNorm(UpperCamelCase__ )
elif norm_in_type is None:
__UpperCamelCase =None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
__UpperCamelCase =nn.LayerNorm(UpperCamelCase__ )
__UpperCamelCase =nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
__UpperCamelCase =causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , UpperCamelCase__ , persistent=UpperCamelCase__ )
__UpperCamelCase =nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
__UpperCamelCase =nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase_ ( self : Any ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
__UpperCamelCase ={}
def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(UpperCamelCase__ , '''set_processor''' ):
__UpperCamelCase =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase__ , UpperCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return processors
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =len(self.attn_processors.keys() )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase__ )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : int ):
if hasattr(UpperCamelCase__ , '''set_processor''' ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
module.set_processor(UpperCamelCase__ )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase__ , UpperCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =hidden_states.shape[0]
__UpperCamelCase =timestep
if not torch.is_tensor(UpperCamelCase__ ):
__UpperCamelCase =torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCamelCase__ ) and len(timesteps.shape ) == 0:
__UpperCamelCase =timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase =timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device )
__UpperCamelCase =self.time_proj(UpperCamelCase__ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__UpperCamelCase =timesteps_projected.to(dtype=self.dtype )
__UpperCamelCase =self.time_embedding(UpperCamelCase__ )
if self.embedding_proj_norm is not None:
__UpperCamelCase =self.embedding_proj_norm(UpperCamelCase__ )
__UpperCamelCase =self.embedding_proj(UpperCamelCase__ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__UpperCamelCase =self.encoder_hidden_states_proj(UpperCamelCase__ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
__UpperCamelCase =self.proj_in(UpperCamelCase__ )
__UpperCamelCase =self.positional_embedding.to(hidden_states.dtype )
__UpperCamelCase =[]
__UpperCamelCase =0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCamelCase__ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__UpperCamelCase =proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__UpperCamelCase =hidden_states[:, None, :]
__UpperCamelCase =additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__UpperCamelCase =self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase__ , -1 , -1 )
additional_embeds.append(UpperCamelCase__ )
__UpperCamelCase =torch.cat(
UpperCamelCase__ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__UpperCamelCase =additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__UpperCamelCase =F.pad(
UpperCamelCase__ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__UpperCamelCase =hidden_states + positional_embeddings
if attention_mask is not None:
__UpperCamelCase =(1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
__UpperCamelCase =F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0 )
__UpperCamelCase =(attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__UpperCamelCase =attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__UpperCamelCase =self.norm_in(UpperCamelCase__ )
for block in self.transformer_blocks:
__UpperCamelCase =block(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
__UpperCamelCase =self.norm_out(UpperCamelCase__ )
if self.prd_embedding is not None:
__UpperCamelCase =hidden_states[:, -1]
else:
__UpperCamelCase =hidden_states[:, additional_embeddings_len:]
__UpperCamelCase =self.proj_to_clip_embeddings(UpperCamelCase__ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__ )
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =(prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 85 | 0 |
'''simple docstring'''
import argparse
a_ : Union[str, Any] = """docs/source/_static/js/custom.js"""
def __snake_case ( UpperCAmelCase_ : Dict ):
with open(UpperCAmelCase_ , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase_ = f.readlines()
lowerCamelCase_ = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
lowerCamelCase_ = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
a_ : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 55 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a_ : int = logging.get_logger(__name__)
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = ["input_features", "attention_mask"]
def __init__( self , UpperCamelCase=80 , UpperCamelCase=1_6000 , UpperCamelCase=80 , UpperCamelCase=0.0 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(feature_size=UpperCamelCase , sampling_rate=UpperCamelCase , padding_value=UpperCamelCase , **UpperCamelCase )
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = do_ceptral_normalize
lowerCamelCase_ = normalize_means
lowerCamelCase_ = normalize_vars
lowerCamelCase_ = True
def snake_case ( self , UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowerCamelCase_ = torch.from_numpy(UpperCamelCase ).unsqueeze(0 )
lowerCamelCase_ = ta_kaldi.fbank(UpperCamelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def snake_case ( UpperCamelCase , UpperCamelCase , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = 0.0 , ):
"""simple docstring"""
# make sure we normalize float32 arrays
if normalize_means:
lowerCamelCase_ = x[:input_length].mean(axis=0 )
lowerCamelCase_ = np.subtract(UpperCamelCase , UpperCamelCase )
if normalize_vars:
lowerCamelCase_ = x[:input_length].std(axis=0 )
lowerCamelCase_ = np.divide(UpperCamelCase , UpperCamelCase )
if input_length < x.shape[0]:
lowerCamelCase_ = padding_value
# make sure array is in float32
lowerCamelCase_ = x.astype(np.floataa )
return x
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCamelCase , UpperCamelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCamelCase , UpperCamelCase )
]
def __call__( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCamelCase_ = isinstance(UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase_ = is_batched_numpy or (
isinstance(UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ = [np.asarray(UpperCamelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCamelCase , np.ndarray ):
lowerCamelCase_ = np.asarray(UpperCamelCase , dtype=np.floataa )
elif isinstance(UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ = [raw_speech]
# extract fbank features
lowerCamelCase_ = [self._extract_fbank_features(UpperCamelCase ) for waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase_ = BatchFeature({"input_features": features} )
lowerCamelCase_ = self.pad(
UpperCamelCase , padding=UpperCamelCase , max_length=UpperCamelCase , truncation=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , **UpperCamelCase , )
# make sure list is in array format
lowerCamelCase_ = padded_inputs.get("input_features" )
if isinstance(input_features[0] , UpperCamelCase ):
lowerCamelCase_ = [np.asarray(UpperCamelCase , dtype=np.floataa ) for feature in input_features]
lowerCamelCase_ = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCamelCase_ = [np.asarray(UpperCamelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowerCamelCase_ = (
np.array(UpperCamelCase , dtype=np.intaa )
if self._get_padding_strategies(UpperCamelCase , max_length=UpperCamelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCamelCase_ = self.normalize(
padded_inputs["input_features"] , attention_mask=UpperCamelCase )
if return_tensors is not None:
lowerCamelCase_ = padded_inputs.convert_to_tensors(UpperCamelCase )
return padded_inputs
| 55 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 139 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def snake_case_ (__A : Optional[Any] ) -> Tuple:
__lowerCAmelCase : Optional[int] = SwinConfig()
__lowerCAmelCase : List[Any] = swin_name.split("""_""" )
__lowerCAmelCase : Dict = name_split[1]
__lowerCAmelCase : Optional[Any] = int(name_split[4] )
__lowerCAmelCase : List[Any] = int(name_split[3][-1] )
if model_size == "tiny":
__lowerCAmelCase : List[Any] = 9_6
__lowerCAmelCase : List[Any] = (2, 2, 6, 2)
__lowerCAmelCase : Optional[Any] = (3, 6, 1_2, 2_4)
elif model_size == "small":
__lowerCAmelCase : List[Any] = 9_6
__lowerCAmelCase : Optional[int] = (2, 2, 1_8, 2)
__lowerCAmelCase : Optional[int] = (3, 6, 1_2, 2_4)
elif model_size == "base":
__lowerCAmelCase : List[Any] = 1_2_8
__lowerCAmelCase : Tuple = (2, 2, 1_8, 2)
__lowerCAmelCase : int = (4, 8, 1_6, 3_2)
else:
__lowerCAmelCase : List[Any] = 1_9_2
__lowerCAmelCase : List[str] = (2, 2, 1_8, 2)
__lowerCAmelCase : int = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
__lowerCAmelCase : Dict = 2_1_8_4_1
else:
__lowerCAmelCase : Optional[Any] = 1_0_0_0
__lowerCAmelCase : Union[str, Any] = """huggingface/label-files"""
__lowerCAmelCase : Any = """imagenet-1k-id2label.json"""
__lowerCAmelCase : Any = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase : int = {int(__A ): v for k, v in idalabel.items()}
__lowerCAmelCase : str = idalabel
__lowerCAmelCase : int = {v: k for k, v in idalabel.items()}
__lowerCAmelCase : Optional[Any] = img_size
__lowerCAmelCase : Optional[Any] = num_classes
__lowerCAmelCase : Tuple = embed_dim
__lowerCAmelCase : Union[str, Any] = depths
__lowerCAmelCase : Optional[Any] = num_heads
__lowerCAmelCase : Tuple = window_size
return config
def snake_case_ (__A : int ) -> Optional[Any]:
if "patch_embed.proj" in name:
__lowerCAmelCase : Optional[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__lowerCAmelCase : List[Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
__lowerCAmelCase : int = """encoder.""" + name
if "attn.proj" in name:
__lowerCAmelCase : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowerCAmelCase : Optional[Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowerCAmelCase : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowerCAmelCase : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowerCAmelCase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowerCAmelCase : str = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
__lowerCAmelCase : Dict = """layernorm.weight"""
if name == "norm.bias":
__lowerCAmelCase : Optional[int] = """layernorm.bias"""
if "head" in name:
__lowerCAmelCase : int = name.replace("""head""" , """classifier""" )
else:
__lowerCAmelCase : List[str] = """swin.""" + name
return name
def snake_case_ (__A : List[Any] , __A : str ) -> int:
for key in orig_state_dict.copy().keys():
__lowerCAmelCase : Tuple = orig_state_dict.pop(__A )
if "mask" in key:
continue
elif "qkv" in key:
__lowerCAmelCase : Any = key.split(""".""" )
__lowerCAmelCase : Union[str, Any] = int(key_split[1] )
__lowerCAmelCase : Optional[Any] = int(key_split[3] )
__lowerCAmelCase : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCAmelCase : List[str] = val[:dim, :]
__lowerCAmelCase : List[Any] = val[
dim : dim * 2, :
]
__lowerCAmelCase : str = val[-dim:, :]
else:
__lowerCAmelCase : str = val[
:dim
]
__lowerCAmelCase : int = val[
dim : dim * 2
]
__lowerCAmelCase : int = val[
-dim:
]
else:
__lowerCAmelCase : Tuple = val
return orig_state_dict
def snake_case_ (__A : Union[str, Any] , __A : int ) -> Any:
__lowerCAmelCase : List[Any] = timm.create_model(__A , pretrained=__A )
timm_model.eval()
__lowerCAmelCase : str = get_swin_config(__A )
__lowerCAmelCase : Any = SwinForImageClassification(__A )
model.eval()
__lowerCAmelCase : str = convert_state_dict(timm_model.state_dict() , __A )
model.load_state_dict(__A )
__lowerCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase : Any = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
__lowerCAmelCase : List[Any] = Image.open(requests.get(__A , stream=__A ).raw )
__lowerCAmelCase : List[str] = image_processor(images=__A , return_tensors="""pt""" )
__lowerCAmelCase : Tuple = timm_model(inputs["""pixel_values"""] )
__lowerCAmelCase : Dict = model(**__A ).logits
assert torch.allclose(__A , __A , atol=1e-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__UpperCAmelCase = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 139 | 1 |
"""simple docstring"""
from __future__ import annotations
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : int = 0):
a : Tuple = key
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : Any = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : str = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 40 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 239 | 0 |
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple ) -> List[Any]: # noqa: E741
while r - l > 1:
_snake_case = (l + r) // 2
if v[m] >= key:
_snake_case = m
else:
_snake_case = m # noqa: E741
return r
def _UpperCAmelCase ( __lowerCamelCase : list[int] ) -> int:
if len(__lowerCamelCase ) == 0:
return 0
_snake_case = [0] * len(__lowerCamelCase )
_snake_case = 1
_snake_case = v[0]
for i in range(1 , len(__lowerCamelCase ) ):
if v[i] < tail[0]:
_snake_case = v[i]
elif v[i] > tail[length - 1]:
_snake_case = v[i]
length += 1
else:
_snake_case = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( A_ ):
def __init__( self : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[int] ):
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 40 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a :List[str] = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Tuple = ["ConvNextFeatureExtractor"]
a :str = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Dict = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :Optional[Any] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
a :Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 132 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Generator[tuple[str, ...], None, None]:
SCREAMING_SNAKE_CASE__ : List[Any] = iter(__lowerCAmelCase )
while True:
SCREAMING_SNAKE_CASE__ : Optional[int] = tuple(itertools.islice(__lowerCAmelCase , __lowerCAmelCase ) )
if not chunk:
return
yield chunk
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE__ : Tuple = """"""
if len(__lowerCAmelCase ) < 2:
return dirty
for i in range(len(__lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__lowerCAmelCase ) & 1:
clean += "X"
return clean
def _lowercase ( __lowerCAmelCase ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
SCREAMING_SNAKE_CASE__ : str = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE__ : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__lowerCAmelCase )
return table
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple = generate_table(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_input(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = divmod(table.index(__lowerCAmelCase ) , 5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : str = generate_table(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__lowerCAmelCase , 2 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = divmod(table.index(__lowerCAmelCase ) , 5 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = divmod(table.index(__lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 132 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( A_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(A_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = str(A_ )
lowerCAmelCase__ : Optional[Any] = [n]
for i in range(1 , len(A_ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __SCREAMING_SNAKE_CASE ( A_ ):
if len(str(A_ ) ) > 3:
if not is_prime(int(str(A_ )[-3:] ) ) or not is_prime(int(str(A_ )[:3] ) ):
return False
return True
def __SCREAMING_SNAKE_CASE ( A_ = 11 ):
lowerCAmelCase__ : list[int] = []
lowerCAmelCase__ : Optional[Any] = 13
while len(A_ ) != count:
if validate(A_ ):
lowerCAmelCase__ : int = list_truncated_nums(A_ )
if all(is_prime(A_ ) for i in list_nums ):
list_truncated_primes.append(A_ )
num += 2
return list_truncated_primes
def __SCREAMING_SNAKE_CASE ( ):
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(1_1)) = }''')
| 74 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__UpperCamelCase : Any = NewType('''DataClass''', Any)
__UpperCamelCase : List[str] = NewType('''DataClassType''', Any)
def __SCREAMING_SNAKE_CASE ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : int = {str(A_ ): choice for choice in choices}
return lambda A_ : str_to_choice.get(A_ , A_ )
def __SCREAMING_SNAKE_CASE ( *,
A_ = None , A_ = None , A_ = dataclasses.MISSING , A_ = dataclasses.MISSING , A_ = None , **A_ , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
lowerCAmelCase__ : Dict = {}
if aliases is not None:
lowerCAmelCase__ : int = aliases
if help is not None:
lowerCAmelCase__ : Optional[int] = help
return dataclasses.field(metadata=A_ , default=A_ , default_factory=A_ , **A_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
def __init__( self : Dict ,lowercase_ : Union[DataClassType, Iterable[DataClassType]] ,**lowercase_ : str ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
lowerCAmelCase__ : Tuple = ArgumentDefaultsHelpFormatter
super().__init__(**lowercase_ )
if dataclasses.is_dataclass(lowercase_ ):
lowerCAmelCase__ : Tuple = [dataclass_types]
lowerCAmelCase__ : List[str] = list(lowercase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(lowercase_ )
@staticmethod
def __lowerCAmelCase ( lowercase_ : ArgumentParser ,lowercase_ : dataclasses.Field ):
lowerCAmelCase__ : Dict = F'--{field.name}'
lowerCAmelCase__ : List[str] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,lowercase_ ):
raise RuntimeError(
'''Unresolved type detected, which should have been done with the help of '''
'''`typing.get_type_hints` method by default''' )
lowerCAmelCase__ : List[str] = kwargs.pop('''aliases''' ,[] )
if isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Optional[Any] = [aliases]
lowerCAmelCase__ : Union[str, Any] = getattr(field.type ,'''__origin__''' ,field.type )
if origin_type is Union or (hasattr(lowercase_ ,'''UnionType''' ) and isinstance(lowercase_ ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(lowercase_ ) not in field.type.__args__
):
raise ValueError(
'''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'''
''' the argument parser only supports one type per argument.'''
F' Problem encountered in field \'{field.name}\'.' )
if type(lowercase_ ) not in field.type.__args__:
# filter `str` in Union
lowerCAmelCase__ : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
lowerCAmelCase__ : List[str] = getattr(field.type ,'''__origin__''' ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
lowerCAmelCase__ : Optional[int] = (
field.type.__args__[0] if isinstance(lowercase_ ,field.type.__args__[1] ) else field.type.__args__[1]
)
lowerCAmelCase__ : Optional[Any] = getattr(field.type ,'''__origin__''' ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
lowerCAmelCase__ : List[Any] = {}
if origin_type is Literal or (isinstance(field.type ,lowercase_ ) and issubclass(field.type ,lowercase_ )):
if origin_type is Literal:
lowerCAmelCase__ : Union[str, Any] = field.type.__args__
else:
lowerCAmelCase__ : Optional[Any] = [x.value for x in field.type]
lowerCAmelCase__ : List[str] = make_choice_type_function(kwargs['''choices'''] )
if field.default is not dataclasses.MISSING:
lowerCAmelCase__ : int = field.default
else:
lowerCAmelCase__ : Any = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
lowerCAmelCase__ : List[Any] = copy(lowercase_ )
# Hack because type=bool in argparse does not behave as we want.
lowerCAmelCase__ : Tuple = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
lowerCAmelCase__ : List[Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
lowerCAmelCase__ : Tuple = default
# This tells argparse we accept 0 or 1 value after --field_name
lowerCAmelCase__ : Union[str, Any] = '''?'''
# This is the value that will get picked if we do --field_name (without value)
lowerCAmelCase__ : Any = True
elif isclass(lowercase_ ) and issubclass(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : List[str] = field.type.__args__[0]
lowerCAmelCase__ : str = '''+'''
if field.default_factory is not dataclasses.MISSING:
lowerCAmelCase__ : Dict = field.default_factory()
elif field.default is dataclasses.MISSING:
lowerCAmelCase__ : str = True
else:
lowerCAmelCase__ : List[Any] = field.type
if field.default is not dataclasses.MISSING:
lowerCAmelCase__ : str = field.default
elif field.default_factory is not dataclasses.MISSING:
lowerCAmelCase__ : Any = field.default_factory()
else:
lowerCAmelCase__ : Optional[Any] = True
parser.add_argument(lowercase_ ,*lowercase_ ,**lowercase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
lowerCAmelCase__ : Optional[Any] = False
parser.add_argument(F'--no_{field.name}' ,action='''store_false''' ,dest=field.name ,**lowercase_ )
def __lowerCAmelCase ( self : str ,lowercase_ : DataClassType ):
if hasattr(lowercase_ ,'''_argument_group_name''' ):
lowerCAmelCase__ : Optional[int] = self.add_argument_group(dtype._argument_group_name )
else:
lowerCAmelCase__ : List[str] = self
try:
lowerCAmelCase__ : Dict[str, type] = get_type_hints(lowercase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'''removing line of `from __future__ import annotations` which opts in Postponed '''
'''Evaluation of Annotations (PEP 563)''' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(lowercase_ ):
lowerCAmelCase__ : int = '''.'''.join(map(lowercase_ ,sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'''line of `from __future__ import annotations` which opts in union types as '''
'''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '''
'''support Python versions that lower than 3.10, you need to use '''
'''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '''
'''`X | None`.''' ) from ex
raise
for field in dataclasses.fields(lowercase_ ):
if not field.init:
continue
lowerCAmelCase__ : Any = type_hints[field.name]
self._parse_dataclass_field(lowercase_ ,lowercase_ )
def __lowerCAmelCase ( self : Any ,lowercase_ : Optional[Any]=None ,lowercase_ : str=False ,lowercase_ : str=True ,lowercase_ : Any=None ,lowercase_ : List[str]=None ,):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
lowerCAmelCase__ : int = []
if args_filename:
args_files.append(Path(lowercase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
lowerCAmelCase__ : List[str] = ArgumentParser()
args_file_parser.add_argument(lowercase_ ,type=lowercase_ ,action='''append''' )
# Use only remaining args for further parsing (remove the args_file_flag)
lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = args_file_parser.parse_known_args(args=lowercase_ )
lowerCAmelCase__ : int = vars(lowercase_ ).get(args_file_flag.lstrip('''-''' ) ,lowercase_ )
if cmd_args_file_paths:
args_files.extend([Path(lowercase_ ) for p in cmd_args_file_paths] )
lowerCAmelCase__ : Tuple = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
lowerCAmelCase__ : Dict = file_args + args if args is not None else file_args + sys.argv[1:]
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.parse_known_args(args=lowercase_ )
lowerCAmelCase__ : Optional[Any] = []
for dtype in self.dataclass_types:
lowerCAmelCase__ : int = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
lowerCAmelCase__ : int = {k: v for k, v in vars(lowercase_ ).items() if k in keys}
for k in keys:
delattr(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Optional[Any] = dtype(**lowercase_ )
outputs.append(lowercase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(lowercase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def __lowerCAmelCase ( self : Any ,lowercase_ : Dict[str, Any] ,lowercase_ : bool = False ):
lowerCAmelCase__ : List[Any] = set(args.keys() )
lowerCAmelCase__ : Any = []
for dtype in self.dataclass_types:
lowerCAmelCase__ : Optional[Any] = {f.name for f in dataclasses.fields(lowercase_ ) if f.init}
lowerCAmelCase__ : Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
lowerCAmelCase__ : Union[str, Any] = dtype(**lowercase_ )
outputs.append(lowercase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowercase_ )}' )
return tuple(lowercase_ )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : str ,lowercase_ : bool = False ):
with open(Path(lowercase_ ) ,encoding='''utf-8''' ) as open_json_file:
lowerCAmelCase__ : Union[str, Any] = json.loads(open_json_file.read() )
lowerCAmelCase__ : List[str] = self.parse_dict(lowercase_ ,allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
def __lowerCAmelCase ( self : Dict ,lowercase_ : str ,lowercase_ : bool = False ):
lowerCAmelCase__ : Tuple = self.parse_dict(yaml.safe_load(Path(lowercase_ ).read_text() ) ,allow_extra_keys=lowercase_ )
return tuple(lowercase_ )
| 74 | 1 |
from math import ceil
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] = 1001 ) -> int:
__lowercase = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__lowercase = 2 * i + 1
__lowercase = 2 * i
__lowercase = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
SCREAMING_SNAKE_CASE__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 325 |
'''simple docstring'''
from __future__ import annotations
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if len(a__ ) == 0:
raise ValueError("""find_max() arg is an empty sequence""" )
if (
left >= len(a__ )
or left < -len(a__ )
or right >= len(a__ )
or right < -len(a__ )
):
raise IndexError("""list index out of range""" )
if left == right:
return nums[left]
__SCREAMING_SNAKE_CASE = (left + right) >> 1 # the middle
__SCREAMING_SNAKE_CASE = find_max(a__ , a__ , a__ ) # find max in range[left, mid]
__SCREAMING_SNAKE_CASE = find_max(a__ , mid + 1 , a__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 267 | 0 |
class UpperCAmelCase :
def __init__(self : List[Any] , snake_case__ : str = "" , snake_case__ : bool = False ) -> None:
'''simple docstring'''
snake_case : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
snake_case : Optional[int] = is_leaf
snake_case : str = prefix
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : str ) -> tuple[str, str, str]:
'''simple docstring'''
snake_case : Optional[int] = 0
for q, w in zip(self.prefix , snake_case__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : str ) -> None:
'''simple docstring'''
if self.prefix == word:
snake_case : List[Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
snake_case : Tuple = RadixNode(prefix=snake_case__ , is_leaf=snake_case__ )
else:
snake_case : str = self.nodes[word[0]]
snake_case : Optional[Any] = incoming_node.match(
snake_case__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(snake_case__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
snake_case : Optional[Any] = remaining_prefix
snake_case : Optional[Any] = self.nodes[matching_string[0]]
snake_case : str = RadixNode(snake_case__ , snake_case__ )
snake_case : List[str] = aux_node
if remaining_word == "":
snake_case : int = True
else:
self.nodes[matching_string[0]].insert(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : str ) -> bool:
'''simple docstring'''
snake_case : Optional[int] = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
snake_case : Dict = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> bool:
'''simple docstring'''
snake_case : Tuple = self.nodes.get(word[0] , snake_case__ )
if not incoming_node:
return False
else:
snake_case : str = incoming_node.match(
snake_case__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(snake_case__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
snake_case : List[str] = list(self.nodes.values() )[0]
snake_case : int = merging_node.is_leaf
self.prefix += merging_node.prefix
snake_case : Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
snake_case : Tuple = False
# If there is 1 edge, we merge it with its child
else:
snake_case : List[str] = list(incoming_node.nodes.values() )[0]
snake_case : Any = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
snake_case : Any = merging_node.nodes
return True
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : int = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase ( ):
snake_case : Tuple = "banana bananas bandana band apple all beast".split()
snake_case : Union[str, Any] = RadixNode()
root.insert_many(__lowerCamelCase )
assert all(root.find(__lowerCamelCase ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def UpperCamelCase ( ):
assert test_trie()
def UpperCamelCase ( ):
snake_case : List[str] = RadixNode()
snake_case : List[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(__lowerCamelCase )
print("Words:" , __lowerCamelCase )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 352 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
snake_case : Tuple = ksize + 1
snake_case : int = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(__lowerCamelCase ):
for x in range(__lowerCamelCase ):
# distance from center
snake_case : int = x - ksize // 2
snake_case : Union[str, Any] = y - ksize // 2
# degree to radiant
snake_case : List[str] = theta / 180 * np.pi
snake_case : List[Any] = np.cos(_theta )
snake_case : Dict = np.sin(_theta )
# get kernel x
snake_case : Optional[int] = cos_theta * px + sin_theta * py
# get kernel y
snake_case : str = -sin_theta * px + cos_theta * py
# fill kernel
snake_case : Any = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase = imread("""../image_data/lena.jpg""")
# turn image in gray scale value
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 1_20, 1_50]:
__lowerCamelCase = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase = out / out.max() * 2_55
__lowerCamelCase = out.astype(np.uinta)
imshow("""Original""", gray)
imshow("""Gabor filter with 20x20 mask and 6 directions""", out)
waitKey(0)
| 10 | 0 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__A = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCamelCase__ ( lowerCamelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case , snake_case : Optional[Any] = {}, {}
if padding is not None:
snake_case : Optional[Any] = padding
if truncation is not None:
snake_case : Union[str, Any] = truncation
if top_k is not None:
snake_case : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Tuple = {"image": image, "question": question}
else:
snake_case : List[str] = image
snake_case : Optional[int] = super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return results
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
snake_case : List[Any] = load_image(inputs["image"] )
snake_case : Tuple = self.tokenizer(
inputs["question"] , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = self.image_processor(images=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(SCREAMING_SNAKE_CASE )
return model_inputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[Any] = self.model(**SCREAMING_SNAKE_CASE )
return model_outputs
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
snake_case : List[Any] = self.model.config.num_labels
if self.framework == "pt":
snake_case : Optional[int] = model_outputs.logits.sigmoid()[0]
snake_case , snake_case : Any = probs.topk(SCREAMING_SNAKE_CASE )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
snake_case : Optional[Any] = scores.tolist()
snake_case : List[Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )]
| 148 |
"""simple docstring"""
from functools import lru_cache
@lru_cache
def UpperCamelCase__ ( lowercase__ : int ):
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 | 1 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : Dict = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = 'conditional_detr'
_snake_case : Tuple = ['past_key_values']
_snake_case : List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : Optional[Any]=300 , lowerCAmelCase__ : int=6 , lowerCAmelCase__ : Optional[Any]=2048 , lowerCAmelCase__ : int=8 , lowerCAmelCase__ : Union[str, Any]=6 , lowerCAmelCase__ : int=2048 , lowerCAmelCase__ : int=8 , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str="relu" , lowerCAmelCase__ : Any=256 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : List[str]=0.0 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : Optional[Any]="sine" , lowerCAmelCase__ : List[Any]="resnet50" , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : List[str]=5 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : Dict=1 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : int=0.25 , **lowerCAmelCase__ : Any , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = backbone_config.get('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(lowerCAmelCase__ )
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = cls_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
return self.d_model
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = version.parse('1.11' )
@property
def snake_case__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def snake_case__ ( self : Optional[Any] ) -> float:
'''simple docstring'''
return 1e-5
@property
def snake_case__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return 12
| 287 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase = controlnet_params
_UpperCamelCase = '''bird'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
_UpperCamelCase = pipe.prepare_image_inputs([canny_image] * num_samples )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCamelCase = replicate(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def snake_case__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=lowerCAmelCase__ , from_pt=lowerCAmelCase__ , dtype=jnp.bfloataa )
_UpperCamelCase = controlnet_params
_UpperCamelCase = '''Chef in the kitchen'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = pipe.prepare_text_inputs([prompts] * num_samples )
_UpperCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
_UpperCamelCase = pipe.prepare_image_inputs([pose_image] * num_samples )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(lowerCAmelCase__ , jax.device_count() )
_UpperCamelCase = replicate(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = shard(lowerCAmelCase__ )
_UpperCamelCase = pipe(
prompt_ids=lowerCAmelCase__ , image=lowerCAmelCase__ , params=lowerCAmelCase__ , prng_seed=lowerCAmelCase__ , num_inference_steps=50 , jit=lowerCAmelCase__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 287 | 1 |
__A : str = "Tobias Carryer"
from time import time
class A_ :
def __init__( self , _A , _A , _A , _A=int(time() ) ): # noqa: B008
'''simple docstring'''
UpperCAmelCase = multiplier
UpperCAmelCase = increment
UpperCAmelCase = modulo
UpperCAmelCase = seed
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__A : Optional[int] = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 273 |
from datetime import datetime
import requests
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes:
'''simple docstring'''
UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(UpperCamelCase__ ).content
if __name__ == "__main__":
__A : Union[str, Any] = input("Enter Video/IGTV url: ").strip()
__A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(F'Done. Video saved to disk as {file_name}.')
| 273 | 1 |
UpperCAmelCase = """\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"""
UpperCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
UpperCAmelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 365 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInpaintPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
snake_case_ = CLIPTextModel(_UpperCAmelCase )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
snake_case_ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCAmelCase )
else:
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
snake_case_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase )
snake_case_ = sd_pipe(**_UpperCAmelCase ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = PNDMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , scheduler=_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 267 | 0 |
'''simple docstring'''
import re
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
if len(re.findall('[ATCG]' , __A ) ) != len(__A ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase : Optional[Any] = False
class __UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = generator.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_snake_case = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 42 | 1 |
from typing import Any
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if not input_list:
return []
__lowerCamelCase : int = [input_list.count(SCREAMING_SNAKE_CASE__ ) for value in input_list]
__lowerCamelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(SCREAMING_SNAKE_CASE__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase_ = 5_0_0_0_0
lowercase_ = 5_0_0_0
lowercase_ ,lowercase_ = os.path.split(__file__)
lowercase_ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for i in range(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = dataset[i]
@get_duration
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = dataset[i : i + batch_size]
@get_duration
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ):
for i in range(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = dataset[i]
@get_duration
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ):
for i in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = dataset[i : i + batch_size]
def UpperCamelCase__ ( ):
__lowerCamelCase : Union[str, Any] = {'num examples': SPEED_TEST_N_EXAMPLES}
__lowerCamelCase : Optional[Any] = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
__lowerCamelCase : Any = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
__lowerCamelCase : Optional[int] = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
__lowerCamelCase : str = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__ , 'dataset.arrow' ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase : Optional[int] = func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
print('shuffling dataset' )
__lowerCamelCase : str = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase : int = func(
SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 194 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list:
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
lowerCAmelCase__ : List[Any] = gray_code_sequence_string(__lowerCAmelCase )
#
# convert them to integers
for i in range(len(__lowerCAmelCase ) ):
lowerCAmelCase__ : Union[str, Any] = int(sequence[i] , 2 )
return sequence
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list:
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCAmelCase__ : Any = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCAmelCase__ : Union[str, Any] = gray_code_sequence_string(bit_count - 1 )
lowerCAmelCase__ : Dict = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCAmelCase__ : Optional[int] = '0' + smaller_sequence[i]
sequence.append(__lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCAmelCase__ : Optional[Any] = '1' + smaller_sequence[i]
sequence.append(__lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["image_processor", "tokenizer"]
UpperCamelCase__ = "Pix2StructImageProcessor"
UpperCamelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , **UpperCAmelCase )
else:
# add pixel_values and bbox
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , header_text=UpperCAmelCase , **UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if "attention_mask" in text_encoding:
_UpperCAmelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_UpperCAmelCase = text_encoding.pop('input_ids' )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
A : Union[str, Any] = '''\
@inproceedings{popovic-2015-chrf,
title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",
month = sep,
year = "2015",
address = "Lisbon, Portugal",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W15-3049",
doi = "10.18653/v1/W15-3049",
pages = "392--395",
}
@inproceedings{popovic-2017-chrf,
title = "chr{F}++: words helping character n-grams",
author = "Popovi{\'c}, Maja",
booktitle = "Proceedings of the Second Conference on Machine Translation",
month = sep,
year = "2017",
address = "Copenhagen, Denmark",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W17-4770",
doi = "10.18653/v1/W17-4770",
pages = "612--618",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
A : Tuple = '''\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
'''
A : Dict = '''
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
\'score\' (float): The chrF (chrF++) score,
\'char_order\' (int): The character n-gram order,
\'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
\'beta\' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]
>>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]
>>> chrf = datasets.load_metric("chrf")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[
"""https://github.com/m-popovic/chrF""",
] , )
def a_ ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : int = CHRF.CHAR_ORDER , __lowerCAmelCase : int = CHRF.WORD_ORDER , __lowerCAmelCase : int = CHRF.BETA , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , ) -> Dict:
"""simple docstring"""
A__ = len(references[0] )
if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
A__ = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )]
A__ = CHRF(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ = sb_chrf.corpus_score(__lowerCAmelCase , __lowerCAmelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 276 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
A__ = prime_factors(__a )
if is_square_free(__a ):
return -1 if len(__a ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 34 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
'''simple docstring'''
A_ : List[str] = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def lowerCAmelCase ( ):
"""simple docstring"""
if os.name == "nt":
__A = CursorInfo()
__A = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
__A = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def lowerCAmelCase ( ):
"""simple docstring"""
if os.name == "nt":
__A = CursorInfo()
__A = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
__A = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__UpperCamelCase , ctypes.byref(__UpperCamelCase ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def lowerCAmelCase ( ):
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 266 | 0 |
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> Dict:
'''simple docstring'''
snake_case_ : Any = n
snake_case_ : Any = [None] * self.n
snake_case_ : Tuple = 0 # index of the first element
snake_case_ : str = 0
snake_case_ : List[Any] = 0
def __len__(self ) -> int:
'''simple docstring'''
return self.size
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
return self.size == 0
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
snake_case_ : Union[str, Any] = data
snake_case_ : Optional[int] = (self.rear + 1) % self.n
self.size += 1
return self
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
if self.size == 0:
raise Exception('''UNDERFLOW''' )
snake_case_ : Optional[int] = self.array[self.front]
snake_case_ : Optional[int] = None
snake_case_ : Tuple = (self.front + 1) % self.n
self.size -= 1
return temp
| 365 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : Optional[int] = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
lowerCamelCase_ : bool = field(
default=_a, metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
}, )
lowerCamelCase_ : Optional[int] = field(
default=_a, metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
}, )
@dataclass
class __lowerCAmelCase :
lowerCamelCase_ : str = field(
default=_a, metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowerCamelCase_ : str = field(
default=_a, metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowerCamelCase_ : Optional[str] = field(
default=_a, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
lowerCamelCase_ : Optional[bool] = field(
default=_a, metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''}, )
lowerCamelCase_ : str = field(
default='''main''', metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
}, )
lowerCamelCase_ : bool = field(
default=_a, metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''}, )
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ , snake_case_ , snake_case_ : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : List[Any] = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
datasets.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case_ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
snake_case_ : Union[str, Any] = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
snake_case_ : str = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Optional[int] = train_dataset.features['''label'''].names
if training_args.do_eval:
snake_case_ : Dict = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Tuple = eval_dataset.features['''label'''].names
if training_args.do_predict:
snake_case_ : int = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Optional[int] = predict_dataset.features['''label'''].names
# Labels
snake_case_ : int = len(_UpperCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCamelCase , idalabel={str(_UpperCamelCase ): label for i, label in enumerate(_UpperCamelCase )} , labelaid={label: i for i, label in enumerate(_UpperCamelCase )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
snake_case_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
snake_case_ : Dict = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
snake_case_ : str = False
def preprocess_function(_UpperCamelCase ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=_UpperCamelCase , max_length=data_args.max_seq_length , truncation=_UpperCamelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
snake_case_ : List[Any] = min(len(_UpperCamelCase ) , data_args.max_train_samples )
snake_case_ : int = train_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
snake_case_ : Optional[int] = train_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_UpperCamelCase ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
snake_case_ : List[str] = min(len(_UpperCamelCase ) , data_args.max_eval_samples )
snake_case_ : List[str] = eval_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
snake_case_ : List[str] = eval_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
snake_case_ : Union[str, Any] = min(len(_UpperCamelCase ) , data_args.max_predict_samples )
snake_case_ : Dict = predict_dataset.select(range(_UpperCamelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
snake_case_ : List[str] = predict_dataset.map(
_UpperCamelCase , batched=_UpperCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
snake_case_ : int = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase ):
snake_case_ : List[str] = p.predictions[0] if isinstance(p.predictions , _UpperCamelCase ) else p.predictions
snake_case_ : Tuple = np.argmax(_UpperCamelCase , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
snake_case_ : Optional[int] = default_data_collator
elif training_args.fpaa:
snake_case_ : Any = DataCollatorWithPadding(_UpperCamelCase , pad_to_multiple_of=8 )
else:
snake_case_ : Any = None
# Initialize our Trainer
snake_case_ : Any = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
snake_case_ : int = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : Dict = last_checkpoint
snake_case_ : int = trainer.train(resume_from_checkpoint=_UpperCamelCase )
snake_case_ : Union[str, Any] = train_result.metrics
snake_case_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase )
)
snake_case_ : Dict = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _UpperCamelCase )
trainer.save_metrics('''train''' , _UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ : Any = trainer.evaluate(eval_dataset=_UpperCamelCase )
snake_case_ : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase )
snake_case_ : str = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''eval''' , _UpperCamelCase )
trainer.save_metrics('''eval''' , _UpperCamelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
snake_case_ , snake_case_ , snake_case_ : Optional[int] = trainer.predict(_UpperCamelCase , metric_key_prefix='''predict''' )
snake_case_ : Union[str, Any] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_UpperCamelCase )
)
snake_case_ : Optional[int] = min(_UpperCamelCase , len(_UpperCamelCase ) )
trainer.log_metrics('''predict''' , _UpperCamelCase )
trainer.save_metrics('''predict''' , _UpperCamelCase )
snake_case_ : List[Any] = np.argmax(_UpperCamelCase , axis=1 )
snake_case_ : Optional[Any] = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(_UpperCamelCase , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_UpperCamelCase ):
snake_case_ : List[str] = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 279 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
lowerCamelCase__ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
A_ : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'})
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
A_ : Optional[str] = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the training data.'})
A_ : Optional[str] = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the validation data.'})
A_ : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'})
A_ : int = field(default=3_2 , metadata={'help': 'The size of the square patches to use for masking.'})
A_ : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = {}
if self.train_dir is not None:
__lowerCAmelCase : Dict = self.train_dir
if self.validation_dir is not None:
__lowerCAmelCase : Dict = self.validation_dir
__lowerCAmelCase : str = data_files if data_files else None
@dataclass
class A__ :
A_ : str = field(
default=_lowerCamelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCamelCase)} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
A_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A_ : str = field(default=_lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'})
A_ : bool = field(
default=_lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={'help': 'Stride to use for the encoder.'} , )
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE=1_92 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.6 ):
__lowerCAmelCase : List[str] = input_size
__lowerCAmelCase : str = mask_patch_size
__lowerCAmelCase : Tuple = model_patch_size
__lowerCAmelCase : int = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
__lowerCAmelCase : str = self.input_size // self.mask_patch_size
__lowerCAmelCase : Dict = self.mask_patch_size // self.model_patch_size
__lowerCAmelCase : Union[str, Any] = self.rand_size**2
__lowerCAmelCase : Any = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
__lowerCAmelCase : List[Any] = np.random.permutation(self.token_count )[: self.mask_count]
__lowerCAmelCase : Union[str, Any] = np.zeros(self.token_count , dtype=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Any = mask.reshape((self.rand_size, self.rand_size) )
__lowerCAmelCase : int = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = torch.stack([example['pixel_values'] for example in examples] )
__lowerCAmelCase : int = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def __lowerCAmelCase ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__lowerCAmelCase : Dict = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowerCAmelCase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCAmelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__lowerCAmelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__lowerCAmelCase : Dict = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0:
__lowerCAmelCase : int = ds['train'].train_test_split(data_args.train_val_split )
__lowerCAmelCase : Optional[Any] = split['train']
__lowerCAmelCase : Dict = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase : List[str] = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **_UpperCamelCase )
elif model_args.model_name_or_path:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
__lowerCAmelCase : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_UpperCamelCase , 'decoder_type' ):
__lowerCAmelCase : List[str] = 'simmim'
# adapt config
__lowerCAmelCase : List[str] = model_args.image_size if model_args.image_size is not None else config.image_size
__lowerCAmelCase : Tuple = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__lowerCAmelCase : Dict = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__lowerCAmelCase : Any = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCamelCase )
elif model_args.model_name_or_path:
__lowerCAmelCase : Dict = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase )
else:
__lowerCAmelCase : Optional[Any] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__lowerCAmelCase : Optional[Any] = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__lowerCAmelCase : Optional[Any] = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__lowerCAmelCase : Optional[Any] = AutoModelForMaskedImageModeling.from_config(_UpperCamelCase )
if training_args.do_train:
__lowerCAmelCase : Any = ds['train'].column_names
else:
__lowerCAmelCase : List[str] = ds['validation'].column_names
if data_args.image_column_name is not None:
__lowerCAmelCase : List[Any] = data_args.image_column_name
elif "image" in column_names:
__lowerCAmelCase : Dict = 'image'
elif "img" in column_names:
__lowerCAmelCase : Optional[int] = 'img'
else:
__lowerCAmelCase : Dict = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__lowerCAmelCase : Optional[Any] = Compose(
[
Lambda(lambda _UpperCamelCase : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__lowerCAmelCase : List[str] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_UpperCamelCase ):
__lowerCAmelCase : List[Any] = [transforms(_UpperCamelCase ) for image in examples[image_column_name]]
__lowerCAmelCase : int = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__lowerCAmelCase : Optional[Any] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__lowerCAmelCase : Union[str, Any] = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_UpperCamelCase )
# Initialize our trainer
__lowerCAmelCase : List[str] = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , )
# Training
if training_args.do_train:
__lowerCAmelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
__lowerCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCAmelCase : str = last_checkpoint
__lowerCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__lowerCAmelCase : List[str] = trainer.evaluate()
trainer.log_metrics('eval' , _UpperCamelCase )
trainer.save_metrics('eval' , _UpperCamelCase )
# Write model card and (optionally) push to hub
__lowerCAmelCase : int = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ : int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Tuple = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCamelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 286 | 0 |
'''simple docstring'''
import functools
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] ) -> int:
"""simple docstring"""
# Validation
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError("""The parameter days should be a list of integers""" )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError("""The parameter costs should be a list of three integers""" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError("""All days elements should be greater than 0""" )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError("""All days elements should be less than 366""" )
lowerCAmelCase = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_SCREAMING_SNAKE_CASE : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 |
'''simple docstring'''
from __future__ import annotations
class __snake_case:
'''simple docstring'''
def __init__( self , A_ = 0 ) -> Dict:
lowerCAmelCase = key
def __snake_case ( self , A_ , A_ ) -> list[str]:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __snake_case ( self , A_ , A_ ) -> list[str]:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __snake_case ( self , A_ , A_ = 0 ) -> str:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = """"""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __snake_case ( self , A_ , A_ = 0 ) -> str:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
lowerCAmelCase = """"""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __snake_case ( self , A_ , A_ = 0 ) -> bool:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("""encrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A_ , A_ ) )
except OSError:
return False
return True
def __snake_case ( self , A_ , A_ ) -> bool:
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("""decrypt.out""" , """w+""" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A_ , A_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 187 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowerCAmelCase ( lowercase : Any , lowercase : str , lowercase : str , lowercase : Path , lowercase : str = None , lowercase : str = None , lowercase : str = None , ) -> int:
"""simple docstring"""
if config_name_or_path is None:
snake_case : Tuple = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
snake_case : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
snake_case : Any = question_encoder_name_or_path
snake_case : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
snake_case : str = RagConfig.from_pretrained(lowercase )
snake_case : Dict = AutoConfig.from_pretrained(lowercase )
snake_case : Tuple = AutoConfig.from_pretrained(lowercase )
snake_case : str = gen_config
snake_case : Dict = question_encoder_config
snake_case : Any = model_class.from_pretrained_question_encoder_generator(
lowercase , lowercase , config=lowercase )
rag_model.save_pretrained(lowercase )
# Sanity check.
model_class.from_pretrained(lowercase )
# Save tokenizers.
snake_case : Optional[int] = AutoTokenizer.from_pretrained(lowercase )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowercase )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
__snake_case = parser.parse_args()
__snake_case = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 203 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Optional[Any] = '''xmod'''
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=2 , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=("en_XX",) , UpperCamelCase__=None , **UpperCamelCase__ , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
snake_case : List[Any] = vocab_size
snake_case : List[Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : List[str] = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : int = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Optional[int] = max_position_embeddings
snake_case : Tuple = type_vocab_size
snake_case : List[str] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[Any] = position_embedding_type
snake_case : int = use_cache
snake_case : Dict = classifier_dropout
snake_case : Dict = pre_norm
snake_case : Union[str, Any] = adapter_reduction_factor
snake_case : Any = adapter_layer_norm
snake_case : Optional[int] = adapter_reuse_layer_norm
snake_case : List[Any] = ln_before_adapter
snake_case : str = list(UpperCamelCase__ )
snake_case : int = default_language
class _lowerCAmelCase ( snake_case_ ):
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 203 | 1 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
_SCREAMING_SNAKE_CASE = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class SCREAMING_SNAKE_CASE_ ( __lowercase ):
__magic_name__: List[str] = VOCAB_FILES_NAMES
__magic_name__: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__: Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__: Any = ['''input_ids''', '''attention_mask''']
__magic_name__: Any = TaTokenizer
__magic_name__: List[int] = []
def __init__( self : List[str] , _A : Dict=None , _A : Optional[int]=None , _A : List[str]="</s>" , _A : Any="<unk>" , _A : Any="<pad>" , _A : int=100 , _A : List[Any]=None , **_A : Optional[Any] , ) -> Tuple:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ : Optional[int] = [F"""<extra_id_{i}>""" for i in range(_A )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
snake_case_ : Tuple = len(set(filter(lambda _A : bool('extra_id_' in str(_A ) ) , _A ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
_A , tokenizer_file=_A , eos_token=_A , unk_token=_A , pad_token=_A , extra_ids=_A , additional_special_tokens=_A , **_A , )
snake_case_ : Optional[int] = vocab_file
snake_case_ : Any = False if not self.vocab_file else True
snake_case_ : Union[str, Any] = extra_ids
@staticmethod
def UpperCAmelCase_ ( _A : Tuple , _A : Any , _A : str ) -> Union[str, Any]:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
snake_case_ : Dict = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , _A , )
return max_model_length
def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ : Dict = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def UpperCAmelCase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : Dict = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
snake_case_ : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
snake_case_ : Optional[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return list(
set(filter(lambda _A : bool(re.search(R'<extra_id_\d+>' , _A ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return [self.convert_tokens_to_ids(_A ) for token in self.get_sentinel_tokens()]
| 363 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88 | 0 |
import numpy as np
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__a = (0, 0)
__a = None
__a = 0
__a = 0
__a = 0
def __eq__( self : Tuple , __lowercase : Optional[Any] ):
'''simple docstring'''
return self.position == cell.position
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
print(self.position )
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __lowercase : Any=(5, 5) ):
'''simple docstring'''
__a = np.zeros(__lowercase )
__a = world_size[0]
__a = world_size[1]
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
print(self.w )
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : List[str] ):
'''simple docstring'''
__a = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__a = cell.position[0]
__a = cell.position[1]
__a = []
for n in neughbour_cord:
__a = current_x + n[0]
__a = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__a = Cell()
__a = (x, y)
__a = cell
neighbours.append(__lowercase )
return neighbours
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a = []
__a = []
_open.append(_UpperCamelCase )
while _open:
__a = np.argmin([n.f for n in _open] )
__a = _open[min_f]
_closed.append(_open.pop(_UpperCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(_UpperCamelCase ):
for c in _closed:
if c == n:
continue
__a = current.g + 1
__a = n.position
__a = goal.position
__a = (ya - ya) ** 2 + (xa - xa) ** 2
__a = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_UpperCamelCase )
__a = []
while current.parent is not None:
path.append(current.position )
__a = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowerCamelCase__ = Gridworld()
# Start position and goal
lowerCamelCase__ = Cell()
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = Cell()
lowerCamelCase__ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowerCamelCase__ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowerCamelCase__ = 1
print(world.w)
| 302 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : str = list(s_dict.keys() )
for key in keys:
snake_case_ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f'''{key} -> {new_key}''' )
snake_case_ : Tuple = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
snake_case_ : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : List[Any] = os.path.basename(_UpperCamelCase )
snake_case_ : Any = url.split('''/''' )[-2]
snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop:
while True:
snake_case_ : Dict = source.read(8_192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case_ : str = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : int = original_checkpoint['''dims''']
snake_case_ : List[str] = original_checkpoint['''model_state_dict''']
snake_case_ : str = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
snake_case_ : Optional[int] = True
snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
snake_case_ : List[str] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Any = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
_A = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
_A = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert('RGB' )
return image
def _snake_case ( _snake_case : int ) -> Optional[int]:
'''simple docstring'''
_A = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] , _snake_case : str ) -> List[Any]:
'''simple docstring'''
_A = dct.pop(__UpperCAmelCase )
_A = val
def _snake_case ( _snake_case : List[Any] , _snake_case : str ) -> str:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_A = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
_A = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
_A = torch.cat((q_bias, torch.zeros_like(__UpperCAmelCase , requires_grad=__UpperCAmelCase ), v_bias) )
_A = qkv_bias
def _snake_case ( _snake_case : List[str] , _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
_A = 3_64 if '''coco''' in model_name else 2_24
_A = BlipaVisionConfig(image_size=__UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_A = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__UpperCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
_A = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__UpperCAmelCase ).to_dict()
elif "t5-xl" in model_name:
_A = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_A = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
_A = BlipaConfig(vision_config=__UpperCAmelCase , text_config=__UpperCAmelCase )
return config, image_size
@torch.no_grad()
def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[int]=None , _snake_case : List[Any]=False ) -> int:
'''simple docstring'''
_A = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
_A = tokenizer('\n' , add_special_tokens=__UpperCAmelCase ).input_ids[0]
_A = get_blipa_config(__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
_A = BlipaForConditionalGeneration(__UpperCAmelCase ).eval()
_A = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
_A = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
_A = '''cuda''' if torch.cuda.is_available() else '''cpu'''
_A = load_model_and_preprocess(
name=__UpperCAmelCase , model_type=__UpperCAmelCase , is_eval=__UpperCAmelCase , device=__UpperCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
_A = original_model.state_dict()
_A = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_A = state_dict.pop(__UpperCAmelCase )
if key.startswith('Qformer.bert' ):
_A = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
_A = key.replace('self' , 'attention' )
if "opt_proj" in key:
_A = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
_A = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
_A = key.replace('opt' , 'language' )
if key.startswith('t5' ):
_A = key.replace('t5' , 'language' )
_A = val
# read in qv biases
read_in_q_v_bias(__UpperCAmelCase , __UpperCAmelCase )
_A = hf_model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert len(__UpperCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_A = load_demo_image()
_A = vis_processors['''eval'''](__UpperCAmelCase ).unsqueeze(0 ).to(__UpperCAmelCase )
_A = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__UpperCAmelCase )
# create processor
_A = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
_A = BlipaProcessor(image_processor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
_A = processor(images=__UpperCAmelCase , return_tensors='pt' ).pixel_values.to(__UpperCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase )
original_model.to(__UpperCAmelCase )
hf_model.to(__UpperCAmelCase )
with torch.no_grad():
if "opt" in model_name:
_A = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
_A = hf_model(__UpperCAmelCase , __UpperCAmelCase ).logits
else:
_A = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
_A = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_A = hf_model(__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_A = torch.tensor(
[[-41.58_50, -4.4440, -8.9922], [-47.43_22, -5.9143, -1.7340]] , device=__UpperCAmelCase )
assert torch.allclose(logits[0, :3, :3] , __UpperCAmelCase , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_A = torch.tensor(
[[-57.01_09, -9.8967, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=__UpperCAmelCase )
else:
# cast to same type
_A = logits.dtype
assert torch.allclose(original_logits.to(__UpperCAmelCase ) , __UpperCAmelCase , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
_A = ''''''
_A = tokenizer(__UpperCAmelCase , return_tensors='pt' ).input_ids.to(__UpperCAmelCase )
_A = original_model.generate({'image': original_pixel_values} )
_A = hf_model.generate(
__UpperCAmelCase , __UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , __UpperCAmelCase )
_A = input_ids.shape[1]
_A = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__UpperCAmelCase )
_A = [text.strip() for text in output_text]
print('HF generation:' , __UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
a = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
a = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 352 |
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : str = inspect.getfile(accelerate.test_utils )
UpperCAmelCase : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCAmelCase : List[Any] = ['''accelerate''', '''launch''']
UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
UpperCAmelCase : Union[str, Any] = '''default_config.yaml'''
UpperCAmelCase : Union[str, Any] = config_folder / config_file
UpperCAmelCase : Union[str, Any] = config_folder / '''_default_config.yaml'''
UpperCAmelCase : List[Any] = Path('''tests/test_configs''' )
@classmethod
def lowerCAmelCase_ ( cls : List[Any] ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowerCAmelCase_ ( cls : Tuple ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase_ ( self : Optional[int] ):
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_UpperCAmelCase ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_UpperCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def lowerCAmelCase_ ( self : Any ):
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Dict = '''test-tpu'''
UpperCAmelCase : Optional[int] = '''us-central1-a'''
UpperCAmelCase : List[str] = '''ls'''
UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
UpperCAmelCase : Optional[Any] = '''cd /usr/share'''
UpperCAmelCase : Optional[Any] = '''tests/test_samples/test_command_file.sh'''
UpperCAmelCase : str = '''Running gcloud compute tpus tpu-vm ssh'''
def lowerCAmelCase_ ( self : Any ):
_A = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Dict ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=_UpperCAmelCase )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : str ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : List[str] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : str ):
_A = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : List[Any] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : int ):
_A = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=_UpperCAmelCase , )
self.assertIn(
F'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , _UpperCAmelCase , )
| 271 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__lowerCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__lowerCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(__a ) - np.asarray(__a )) ** 2 ) )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(__a, __a ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCAmelCase__ ( ) -> None:
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""", number=1_00_00, globals=globals(), ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""", number=1_00_00, globals=globals(), ) )
benchmark()
| 162 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__A = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase__: Optional[int] ="lm_head"
lowerCamelCase__: Dict =getattr(__a , __a )
if weight_type is not None:
lowerCamelCase__: str =getattr(__a , __a ).shape
else:
lowerCamelCase__: int =hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase__: Dict =value
elif weight_type == "weight_g":
lowerCamelCase__: Optional[Any] =value
elif weight_type == "weight_v":
lowerCamelCase__: int =value
elif weight_type == "bias":
lowerCamelCase__: List[str] =value
else:
lowerCamelCase__: Union[str, Any] =value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: List[str] =fairseq_model.state_dict()
lowerCamelCase__: Optional[int] =hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__: int =False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__: str =True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase__: List[str] ="unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__: Optional[Any] =True
if "*" in mapped_key:
lowerCamelCase__: Optional[Any] =name.split(__a )[0].split("." )[-2]
lowerCamelCase__: List[str] =mapped_key.replace("*" , __a )
if "weight_g" in name:
lowerCamelCase__: List[str] ="weight_g"
elif "weight_v" in name:
lowerCamelCase__: Union[str, Any] ="weight_v"
elif "bias" in name:
lowerCamelCase__: Dict ="bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__: Tuple ="weight"
else:
lowerCamelCase__: List[Any] =None
set_recursively(__a , __a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =full_name.split("conv_layers." )[-1]
lowerCamelCase__: List[str] =name.split("." )
lowerCamelCase__: str =int(items[0] )
lowerCamelCase__: Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase__: Dict =value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase__: List[Any] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase__: List[str] =value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=True ) -> int:
"""simple docstring"""
if config_path is not None:
lowerCamelCase__: str =UniSpeechConfig.from_pretrained(__a )
else:
lowerCamelCase__: List[Any] =UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase__: str =Dictionary.load_from_json(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase__: Any =target_dict.pad_index
lowerCamelCase__: int =target_dict.bos_index
lowerCamelCase__: Any =target_dict.eos_index
lowerCamelCase__: Dict =len(target_dict.symbols )
lowerCamelCase__: Optional[int] =os.path.join(__a , "vocab.json" )
if not os.path.isdir(__a ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
lowerCamelCase__: Optional[Any] =target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase__: Optional[Any] =42
lowerCamelCase__: List[Any] =43
with open(__a , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__a , __a )
lowerCamelCase__: List[str] =WavaVecaPhonemeCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__a , )
lowerCamelCase__: Dict =True if config.feat_extract_norm == "layer" else False
lowerCamelCase__: Tuple =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
lowerCamelCase__: List[Any] =WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
lowerCamelCase__: int =UniSpeechForCTC(__a )
else:
lowerCamelCase__: int =UniSpeechForPreTraining(__a )
if is_finetuned:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase__: List[str] =model[0].eval()
recursively_load_weights(__a , __a , __a )
hf_unispeech.save_pretrained(__a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__A = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 10 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCAmelCase : List[Any] = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 366 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger('''transformers.models.speecht5''')
_lowerCAmelCase : int = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
_lowerCAmelCase : str = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
_lowerCAmelCase : int = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
_lowerCAmelCase : Union[str, Any] = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
_lowerCAmelCase : Union[str, Any] = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
_lowerCAmelCase : int = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
_lowerCAmelCase : Any = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
_lowerCAmelCase : List[str] = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
_lowerCAmelCase : Optional[Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_lowerCAmelCase : Dict = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Tuple = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
_lowerCAmelCase : Tuple = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
_lowerCAmelCase : int = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
_lowerCAmelCase : Optional[int] = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ) -> Optional[Any]:
for attribute in key.split("." ):
A_ : List[Any] = getattr(_lowerCAmelCase , _lowerCAmelCase )
if weight_type is not None:
A_ : Tuple = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape
else:
A_ : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
A_ : Dict = value
elif weight_type == "weight_g":
A_ : int = value
elif weight_type == "weight_v":
A_ : str = value
elif weight_type == "bias":
A_ : int = value
elif weight_type == "running_mean":
A_ : str = value
elif weight_type == "running_var":
A_ : Any = value
elif weight_type == "num_batches_tracked":
A_ : str = value
else:
A_ : int = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> Union[str, Any]:
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_ , A_ : Tuple = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ) -> Union[str, Any]:
A_ : Tuple = []
if task == "s2t":
A_ : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder
A_ : str = MAPPING_S2T
A_ : Union[str, Any] = IGNORE_KEYS_S2T
elif task == "t2s":
A_ : Optional[int] = None
A_ : Dict = MAPPING_T2S
A_ : Any = IGNORE_KEYS_T2S
elif task == "s2s":
A_ : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder
A_ : Dict = MAPPING_S2S
A_ : List[str] = IGNORE_KEYS_S2S
else:
raise ValueError(f"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(_lowerCAmelCase , _lowerCAmelCase ):
logger.info(f"{name} was ignored" )
continue
A_ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
A_ : Tuple = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A_ , A_ : Optional[Any] = key.split(".*." )
if prefix in name and suffix in name:
A_ : int = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A_ : str = True
if "*" in mapped_key:
A_ : List[str] = name.split(_lowerCAmelCase )[0].split("." )[-2]
A_ : Optional[int] = mapped_key.replace("*" , _lowerCAmelCase )
if "weight_g" in name:
A_ : Union[str, Any] = "weight_g"
elif "weight_v" in name:
A_ : List[Any] = "weight_v"
elif "bias" in name:
A_ : Tuple = "bias"
elif "weight" in name:
A_ : List[Any] = "weight"
elif "running_mean" in name:
A_ : Union[str, Any] = "running_mean"
elif "running_var" in name:
A_ : Union[str, Any] = "running_var"
elif "num_batches_tracked" in name:
A_ : List[Any] = "num_batches_tracked"
else:
A_ : Optional[Any] = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ) -> List[Any]:
A_ : int = full_name.split("conv_layers." )[-1]
A_ : Optional[Any] = name.split("." )
A_ : List[Any] = int(items[0] )
A_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
A_ : Optional[int] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
A_ : Optional[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
A_ : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
A_ : Union[str, Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str=None , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None , ) -> Optional[Any]:
if config_path is not None:
A_ : Dict = SpeechTaConfig.from_pretrained(_lowerCAmelCase )
else:
A_ : Optional[int] = SpeechTaConfig()
if task == "s2t":
A_ : Optional[Any] = config.max_text_positions
A_ : Optional[int] = SpeechTaForSpeechToText(_lowerCAmelCase )
elif task == "t2s":
A_ : str = 1876
A_ : List[str] = 600
A_ : List[str] = config.max_speech_positions
A_ : Tuple = SpeechTaForTextToSpeech(_lowerCAmelCase )
elif task == "s2s":
A_ : Optional[int] = 1876
A_ : int = config.max_speech_positions
A_ : Union[str, Any] = SpeechTaForSpeechToSpeech(_lowerCAmelCase )
else:
raise ValueError(f"Unknown task name: {task}" )
if vocab_path:
A_ : int = SpeechTaTokenizer(_lowerCAmelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A_ : str = AddedToken("<mask>" , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase )
A_ : int = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
A_ : int = SpeechTaFeatureExtractor()
A_ : Optional[Any] = SpeechTaProcessor(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
A_ : Union[str, Any] = torch.load(_lowerCAmelCase )
recursively_load_weights(fairseq_checkpoint["model"] , _lowerCAmelCase , _lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
_lowerCAmelCase : Tuple = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 70 | 0 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=[] ):
__UpperCamelCase =size[0] - overlap_pixels * 2
__UpperCamelCase =size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__UpperCamelCase =np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__UpperCamelCase =np.pad(SCREAMING_SNAKE_CASE__ , mode='linear_ramp' , pad_width=SCREAMING_SNAKE_CASE__ , end_values=0 )
if "l" in remove_borders:
__UpperCamelCase =mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__UpperCamelCase =mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__UpperCamelCase =mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__UpperCamelCase =mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
return max(SCREAMING_SNAKE_CASE__ , min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : [int] , SCREAMING_SNAKE_CASE__ : [int] , SCREAMING_SNAKE_CASE__ : [int] ):
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : [int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : [int] ):
__UpperCamelCase =list(SCREAMING_SNAKE_CASE__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__UpperCamelCase =clamp_rect(SCREAMING_SNAKE_CASE__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE__ , (original_slice, 0) )
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =(original_image_slice * 4, 0, tile.size[0], tile.size[1])
__UpperCamelCase =tile.crop(SCREAMING_SNAKE_CASE__ )
return tile
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =n % d
return n - divisor
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ = 350 , ) -> Union[str, Any]:
super().__init__(
vae=A_ , text_encoder=A_ , tokenizer=A_ , unet=A_ , low_res_scheduler=A_ , scheduler=A_ , max_noise_level=A_ , )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , **A_ ) -> Union[str, Any]:
torch.manual_seed(0 )
__UpperCamelCase =(
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__UpperCamelCase =add_overlap_rect(A_ , A_ , image.size )
__UpperCamelCase =image.crop(A_ )
__UpperCamelCase =((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__UpperCamelCase =translated_slice_x - (original_image_slice / 2)
__UpperCamelCase =max(0 , A_ )
__UpperCamelCase =squeeze_tile(A_ , A_ , A_ , A_ )
__UpperCamelCase =to_input.size
__UpperCamelCase =to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__UpperCamelCase =super(A_ , self ).__call__(image=A_ , **A_ ).images[0]
__UpperCamelCase =upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__UpperCamelCase =unsqueeze_tile(A_ , A_ )
__UpperCamelCase =upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__UpperCamelCase =[]
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__UpperCamelCase =Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=A_ ) , mode='L' , )
final_image.paste(
A_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , A_ )
@torch.no_grad()
def __call__( self , A_ , A_ , A_ = 75 , A_ = 9.0 , A_ = 50 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = None , A_ = 1 , A_ = 128 , A_ = 32 , A_ = 32 , ) -> Tuple:
__UpperCamelCase =Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__UpperCamelCase =math.ceil(image.size[0] / tile_size )
__UpperCamelCase =math.ceil(image.size[1] / tile_size )
__UpperCamelCase =tcx * tcy
__UpperCamelCase =0
for y in range(A_ ):
for x in range(A_ ):
self._process_tile(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , prompt=A_ , num_inference_steps=A_ , guidance_scale=A_ , noise_level=A_ , negative_prompt=A_ , num_images_per_prompt=A_ , eta=A_ , generator=A_ , latents=A_ , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _UpperCAmelCase ( ):
# Run a demo
__UpperCamelCase ='stabilityai/stable-diffusion-x4-upscaler'
__UpperCamelCase =StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , revision='fp16' , torch_dtype=torch.floataa )
__UpperCamelCase =pipe.to('cuda' )
__UpperCamelCase =Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(SCREAMING_SNAKE_CASE__ : List[str] ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save('diffusers_library_progress.jpg' )
__UpperCamelCase =pipe(image=SCREAMING_SNAKE_CASE__ , prompt='Black font, white background, vector' , noise_level=40 , callback=SCREAMING_SNAKE_CASE__ )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 62 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10**12 ):
__UpperCamelCase =1
__UpperCamelCase =0
__UpperCamelCase =1
__UpperCamelCase =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 62 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( snake_case__ ):
_a : Optional[int] = (PNDMScheduler,)
_a : Optional[int] = (("""num_inference_steps""", 5_0),)
def __SCREAMING_SNAKE_CASE( self , **_A ):
"""simple docstring"""
__lowerCAmelCase = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**_A )
return config
def __SCREAMING_SNAKE_CASE( self , _A=0 , **_A ):
"""simple docstring"""
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , _A )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**_A )
__lowerCAmelCase = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
__lowerCAmelCase = scheduler_class.from_pretrained(_A )
new_scheduler.set_timesteps(_A )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(_A , _A , _A , **_A ).prev_sample
__lowerCAmelCase = new_scheduler.step_prk(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__lowerCAmelCase = scheduler.step_plms(_A , _A , _A , **_A ).prev_sample
__lowerCAmelCase = new_scheduler.step_plms(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self , _A=0 , **_A ):
"""simple docstring"""
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , _A )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_A )
__lowerCAmelCase = scheduler_class.from_pretrained(_A )
# copy over dummy past residuals
new_scheduler.set_timesteps(_A )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(_A , _A , _A , **_A ).prev_sample
__lowerCAmelCase = new_scheduler.step_prk(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__lowerCAmelCase = scheduler.step_plms(_A , _A , _A , **_A ).prev_sample
__lowerCAmelCase = new_scheduler.step_plms(_A , _A , _A , **_A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __SCREAMING_SNAKE_CASE( self , **_A ):
"""simple docstring"""
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**_A )
__lowerCAmelCase = scheduler_class(**_A )
__lowerCAmelCase = 1_0
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(_A )
for i, t in enumerate(scheduler.prk_timesteps ):
__lowerCAmelCase = model(_A , _A )
__lowerCAmelCase = scheduler.step_prk(_A , _A , _A ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__lowerCAmelCase = model(_A , _A )
__lowerCAmelCase = scheduler.step_plms(_A , _A , _A ).prev_sample
return sample
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , _A )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_A )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(_A , "set_timesteps" ):
scheduler.set_timesteps(_A )
elif num_inference_steps is not None and not hasattr(_A , "set_timesteps" ):
__lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(_A , 0 , _A , **_A ).prev_sample
__lowerCAmelCase = scheduler.step_prk(_A , 1 , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__lowerCAmelCase = scheduler.step_plms(_A , 0 , _A , **_A ).prev_sample
__lowerCAmelCase = scheduler.step_plms(_A , 1 , _A , **_A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_A )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
__lowerCAmelCase = scheduler_class(**_A )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 2_7
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_A )
scheduler.set_timesteps(_A )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__lowerCAmelCase = scheduler.step_prk(_A , _A , _A ).prev_sample
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with self.assertRaises(_A ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**_A )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.sum(torch.abs(_A ) )
__lowerCAmelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2
assert abs(result_mean.item() - 0.25_80 ) < 1E-3
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
__lowerCAmelCase = torch.sum(torch.abs(_A ) )
__lowerCAmelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 67.39_86 ) < 1E-2
assert abs(result_mean.item() - 0.08_78 ) < 1E-3
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 )
__lowerCAmelCase = torch.sum(torch.abs(_A ) )
__lowerCAmelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2
assert abs(result_mean.item() - 0.29_95 ) < 1E-3
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.full_loop(set_alpha_to_one=_A , beta_start=0.01 )
__lowerCAmelCase = torch.sum(torch.abs(_A ) )
__lowerCAmelCase = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2
assert abs(result_mean.item() - 0.24_34 ) < 1E-3
| 102 |
from pathlib import Path
import fire
from tqdm import tqdm
def _a ( SCREAMING_SNAKE_CASE_ : Dict="ro" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="en" , SCREAMING_SNAKE_CASE_ : Optional[Any]="wmt16" , SCREAMING_SNAKE_CASE_ : List[str]=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
__lowerCAmelCase = F"""{src_lang}-{tgt_lang}"""
print(F"""Converting {dataset}-{pair}""" )
__lowerCAmelCase = datasets.load_dataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if save_dir is None:
__lowerCAmelCase = F"""{dataset}-{pair}"""
__lowerCAmelCase = Path(SCREAMING_SNAKE_CASE_ )
save_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for split in ds.keys():
print(F"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
__lowerCAmelCase = "val" if split == "validation" else split
__lowerCAmelCase = save_dir.joinpath(F"""{fn}.source""" )
__lowerCAmelCase = save_dir.joinpath(F"""{fn}.target""" )
__lowerCAmelCase = src_path.open("w+" )
__lowerCAmelCase = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__lowerCAmelCase = x["translation"]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 102 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class UpperCAmelCase_ ( unittest.TestCase ):
UpperCamelCase =inspect.getfile(accelerate.test_utils )
UpperCamelCase =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
UpperCamelCase =["accelerate", "launch"]
UpperCamelCase =Path.home() / ".cache/huggingface/accelerate"
UpperCamelCase ="default_config.yaml"
UpperCamelCase =config_folder / config_file
UpperCamelCase =config_folder / "_default_config.yaml"
UpperCamelCase =Path("tests/test_configs" )
@classmethod
def _lowerCamelCase ( cls ) -> Dict:
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _lowerCamelCase ( cls ) -> Union[str, Any]:
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _lowerCamelCase ( self ) -> Any:
__lowercase : int = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _lowerCamelCase ( self ) -> Optional[int]:
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=__snake_case ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__snake_case ), self.test_file_path] , env=os.environ.copy() )
def _lowerCamelCase ( self ) -> Any:
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class UpperCAmelCase_ ( unittest.TestCase ):
UpperCamelCase ="test-tpu"
UpperCamelCase ="us-central1-a"
UpperCamelCase ="ls"
UpperCamelCase =["accelerate", "tpu-config"]
UpperCamelCase ="cd /usr/share"
UpperCamelCase ="tests/test_samples/test_command_file.sh"
UpperCamelCase ="Running gcloud compute tpus tpu-vm ssh"
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , __snake_case , )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : List[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , __snake_case , )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__snake_case )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __snake_case , )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : List[str] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , __snake_case , )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , __snake_case , )
def _lowerCamelCase ( self ) -> str:
__lowercase : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __snake_case , )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __snake_case , )
def _lowerCamelCase ( self ) -> Any:
__lowercase : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __snake_case , )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__snake_case , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , __snake_case , )
| 249 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class A__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case = 1.0 , __snake_case = None , ):
super().__init__()
snake_case = initial_learning_rate
snake_case = warmup_steps
snake_case = power
snake_case = decay_schedule_fn
snake_case = name
def __call__( self , __snake_case ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
snake_case = tf.cast(__snake_case , tf.floataa )
snake_case = tf.cast(self.warmup_steps , tf.floataa )
snake_case = global_step_float / warmup_steps_float
snake_case = self.initial_learning_rate * tf.math.pow(__snake_case , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__snake_case , )
def a_ ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ = 0.0 ,UpperCamelCase_ = 0.9 ,UpperCamelCase_ = 0.999 ,UpperCamelCase_ = 1e-8 ,UpperCamelCase_ = None ,UpperCamelCase_ = None ,UpperCamelCase_ = 0.0 ,UpperCamelCase_ = 1.0 ,UpperCamelCase_ = None ,):
"""simple docstring"""
snake_case = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCamelCase_ ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=UpperCamelCase_ ,)
if num_warmup_steps:
snake_case = WarmUp(
initial_learning_rate=UpperCamelCase_ ,decay_schedule_fn=UpperCamelCase_ ,warmup_steps=UpperCamelCase_ ,)
if weight_decay_rate > 0.0:
snake_case = AdamWeightDecay(
learning_rate=UpperCamelCase_ ,weight_decay_rate=UpperCamelCase_ ,beta_a=UpperCamelCase_ ,beta_a=UpperCamelCase_ ,epsilon=UpperCamelCase_ ,clipnorm=UpperCamelCase_ ,global_clipnorm=UpperCamelCase_ ,exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] ,include_in_weight_decay=UpperCamelCase_ ,)
else:
snake_case = tf.keras.optimizers.Adam(
learning_rate=UpperCamelCase_ ,beta_a=UpperCamelCase_ ,beta_a=UpperCamelCase_ ,epsilon=UpperCamelCase_ ,clipnorm=UpperCamelCase_ ,global_clipnorm=UpperCamelCase_ ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self , __snake_case = 0.001 , __snake_case = 0.9 , __snake_case = 0.999 , __snake_case = 1E-7 , __snake_case = False , __snake_case = 0.0 , __snake_case = None , __snake_case = None , __snake_case = "AdamWeightDecay" , **__snake_case , ):
super().__init__(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
snake_case = weight_decay_rate
snake_case = include_in_weight_decay
snake_case = exclude_from_weight_decay
@classmethod
def a_ ( cls , __snake_case ):
snake_case = {'''WarmUp''': WarmUp}
return super(__snake_case , cls ).from_config(__snake_case , custom_objects=__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
super(__snake_case , self )._prepare_local(__snake_case , __snake_case , __snake_case )
snake_case = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
snake_case = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def a_ ( self , __snake_case , __snake_case=None , **__snake_case ):
snake_case , snake_case = list(zip(*__snake_case ) )
return super(__snake_case , self ).apply_gradients(zip(__snake_case , __snake_case ) , name=__snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
snake_case = apply_state or {}
snake_case = apply_state.get((var_device, var_dtype) )
if coefficients is None:
snake_case = self._fallback_apply_state(__snake_case , __snake_case )
snake_case = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def a_ ( self , __snake_case , __snake_case , __snake_case=None ):
snake_case , snake_case = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
snake_case = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_dense(__snake_case , __snake_case , **__snake_case )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case=None ):
snake_case , snake_case = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
snake_case = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_sparse(__snake_case , __snake_case , __snake_case , **__snake_case )
def a_ ( self ):
snake_case = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def a_ ( self , __snake_case ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return False
return True
class A__ ( snake_case__ ):
"""simple docstring"""
def __init__( self ):
snake_case = []
snake_case = None
@property
def a_ ( self ):
if self._accum_steps is None:
snake_case = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def a_ ( self ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , __snake_case ):
if not self._gradients:
snake_case = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__snake_case ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__snake_case ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(__snake_case )}''' )
for accum_gradient, gradient in zip(self._gradients , __snake_case ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__snake_case )
self._accum_steps.assign_add(1 )
def a_ ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__snake_case ) )
| 127 | 0 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
lowercase : List[str] = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
lowercase : List[Any] = {
"ctrl": 256,
}
lowercase : Any = {
"Pregnancy": 168629,
"Christianity": 7675,
"Explain": 106423,
"Fitness": 63440,
"Saving": 63163,
"Ask": 27171,
"Ass": 95985,
"Joke": 163509,
"Questions": 45622,
"Thoughts": 49605,
"Retail": 52342,
"Feminism": 164338,
"Writing": 11992,
"Atheism": 192263,
"Netflix": 48616,
"Computing": 39639,
"Opinion": 43213,
"Alone": 44967,
"Funny": 58917,
"Gaming": 40358,
"Human": 4088,
"India": 1331,
"Joker": 77138,
"Diet": 36206,
"Legal": 11859,
"Norman": 4939,
"Tip": 72689,
"Weight": 52343,
"Movies": 46273,
"Running": 23425,
"Science": 2090,
"Horror": 37793,
"Confession": 60572,
"Finance": 12250,
"Politics": 16360,
"Scary": 191985,
"Support": 12654,
"Technologies": 32516,
"Teenage": 66160,
"Event": 32769,
"Learned": 67460,
"Notion": 182770,
"Wikipedia": 37583,
"Books": 6665,
"Extract": 76050,
"Confessions": 102701,
"Conspiracy": 75932,
"Links": 63674,
"Narcissus": 150425,
"Relationship": 54766,
"Relationships": 134796,
"Reviews": 41671,
"News": 4256,
"Translation": 26820,
"multilingual": 128406,
}
def UpperCAmelCase_ (_lowerCAmelCase : List[str] ):
__UpperCamelCase : List[Any] = set()
__UpperCamelCase : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCamelCase : Union[str, Any] = char
__UpperCamelCase : str = set(_lowerCAmelCase )
return pairs
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : List[str] = VOCAB_FILES_NAMES
lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : str = CONTROL_CODES
def __init__( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="<unk>" , **__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(unk_token=__UpperCamelCase , **__UpperCamelCase )
with open(__UpperCamelCase , encoding="utf-8" ) as vocab_handle:
__UpperCamelCase : Optional[Any] = json.load(__UpperCamelCase )
__UpperCamelCase : List[str] = {v: k for k, v in self.encoder.items()}
with open(__UpperCamelCase , encoding="utf-8" ) as merges_handle:
__UpperCamelCase : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
__UpperCamelCase : int = [tuple(merge.split() ) for merge in merges]
__UpperCamelCase : Optional[int] = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__UpperCamelCase : List[Any] = {}
@property
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
return len(self.encoder )
def __lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__UpperCamelCase : Optional[Any] = tuple(__UpperCamelCase )
__UpperCamelCase : Tuple = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
__UpperCamelCase : List[str] = get_pairs(__UpperCamelCase )
if not pairs:
return token
while True:
__UpperCamelCase : Optional[Any] = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCamelCase , __UpperCamelCase : Optional[Any] = bigram
__UpperCamelCase : int = []
__UpperCamelCase : str = 0
while i < len(__UpperCamelCase ):
try:
__UpperCamelCase : Dict = word.index(__UpperCamelCase , __UpperCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCamelCase : Tuple = j
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCamelCase : str = tuple(__UpperCamelCase )
__UpperCamelCase : List[str] = new_word
if len(__UpperCamelCase ) == 1:
break
else:
__UpperCamelCase : Union[str, Any] = get_pairs(__UpperCamelCase )
__UpperCamelCase : Optional[int] = "@@ ".join(__UpperCamelCase )
__UpperCamelCase : Any = word[:-4]
__UpperCamelCase : Dict = word
return word
def __lowerCamelCase ( self , __UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Tuple = re.findall(r"\S+\n?" , __UpperCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCamelCase ).split(" " ) ) )
return split_tokens
def __lowerCamelCase ( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
return self.decoder.get(__UpperCamelCase , self.unk_token )
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = " ".join(__UpperCamelCase ).replace("@@ " , "" ).strip()
return out_string
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase : Optional[int] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : List[Any] = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + "\n" )
__UpperCamelCase : Dict = 0
with open(__UpperCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__UpperCamelCase : Any = token_index
writer.write(" ".join(__UpperCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 171 |
def UpperCAmelCase_ (_lowerCAmelCase : list ):
if len(_lowerCAmelCase ) <= 1:
return lst
__UpperCamelCase : Dict = 1
while i < len(_lowerCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
__UpperCamelCase : Any = 1
return lst
if __name__ == "__main__":
lowercase : Dict = input("Enter numbers separated by a comma:\n").strip()
lowercase : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 171 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__UpperCAmelCase : Optional[int] = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : int , A : Optional[int] , A : Any , A : Tuple=None , A : Tuple=1 ):
__snake_case: Optional[int] = tokenizer
__snake_case: str = dataset
__snake_case: List[str] = len(__a ) if n_tasks is None else n_tasks
__snake_case: Union[str, Any] = n_copies
def __iter__( self : List[str] ):
__snake_case: Union[str, Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["""prompt"""].strip() )
__snake_case: Optional[int] = self.tokenizer(__a , padding=__a , return_tensors="""pt""" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , A : int , A : Optional[Any] , A : Optional[Any] ):
__snake_case: Optional[int] = start_length
__snake_case: int = eof_strings
__snake_case: List[str] = tokenizer
def __call__( self : Optional[Any] , A : str , A : Tuple , **A : str ):
__snake_case: List[str] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
__snake_case: Optional[Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__a )
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
__snake_case: Any = re.split("""(%s)""" % """|""".join(__snake_case) , __snake_case)
# last string should be ""
return "".join(string_list[:-2])
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=20 , **SCREAMING_SNAKE_CASE__) -> Optional[int]:
__snake_case: List[str] = defaultdict(__snake_case) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case)):
with torch.no_grad():
__snake_case: Any = batch["""ids"""].shape[-1]
__snake_case: int = accelerator.unwrap_model(__snake_case).generate(
input_ids=batch["""ids"""][:, : batch["""input_len"""]] , num_return_sequences=__snake_case , **__snake_case)
# each task is generated batch_size times
__snake_case: List[Any] = batch["""task_id"""].repeat(__snake_case)
__snake_case: Dict = accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id)
__snake_case , __snake_case: List[Any] = accelerator.gather((generated_tokens, generated_tasks))
__snake_case: List[Any] = generated_tokens.cpu().numpy()
__snake_case: Any = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case):
gen_token_dict[task].append(__snake_case)
__snake_case: int = [[] for _ in range(__snake_case)]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
__snake_case: Dict = tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case)
code_gens[task].append(remove_last_block(__snake_case))
return code_gens
def A__ ( ) -> Any:
__snake_case: int = HfArgumentParser(__snake_case)
__snake_case: List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
__snake_case: Dict = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
__snake_case: Tuple = """false"""
if args.num_workers is None:
__snake_case: int = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
__snake_case: str = Accelerator()
set_seed(args.seed , device_specific=__snake_case)
# Load model and tokenizer
__snake_case: int = AutoTokenizer.from_pretrained(args.model_ckpt)
__snake_case: Union[str, Any] = tokenizer.eos_token
__snake_case: List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
# Generation settings
__snake_case: str = {
"""do_sample""": args.do_sample,
"""temperature""": args.temperature,
"""max_new_tokens""": args.max_new_tokens,
"""top_p""": args.top_p,
"""top_k""": args.top_k,
"""stopping_criteria""": StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case)]),
}
# Load evaluation dataset and metric
__snake_case: Any = load_dataset("""openai_humaneval""")
__snake_case: Tuple = load_metric("""code_eval""")
__snake_case: int = args.num_tasks if args.num_tasks is not None else len(human_eval["""test"""])
__snake_case: Optional[Any] = args.n_samples // args.batch_size
__snake_case: Dict = TokenizedDataset(__snake_case , human_eval["""test"""] , n_copies=__snake_case , n_tasks=__snake_case)
# do not confuse args.batch_size, which is actually the num_return_sequences
__snake_case: Tuple = DataLoader(__snake_case , batch_size=1)
# Run a quick test to see if code evaluation is enabled
try:
__snake_case: Any = code_eval_metric.compute(references=[""""""] , predictions=[[""""""]])
except ValueError as exception:
print(
"""Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"""
""" flag to enable code evaluation.""")
raise exception
__snake_case , __snake_case: Optional[int] = accelerator.prepare(__snake_case , __snake_case)
__snake_case: List[Any] = complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
__snake_case: List[Any] = []
for task in tqdm(range(__snake_case)):
__snake_case: int = human_eval["""test"""][task]["""test"""]
__snake_case: Union[str, Any] = F'''check({human_eval['test'][task]['entry_point']})'''
references.append("""\n""" + test_func + """\n""" + entry_point)
# Evaluate completions with "code_eval" metric
__snake_case , __snake_case: Optional[int] = code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers)
print(F'''Results: {pass_at_k}''')
# Save results to json file
with open(args.output_file , """w""") as fp:
json.dump(__snake_case , __snake_case)
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 111 |
"""simple docstring"""
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = 9
_UpperCamelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_UpperCamelCase = kruskal(__snake_case, __snake_case )
_UpperCamelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__snake_case ) == sorted(__snake_case )
| 194 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369 |
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_a : Optional[int] = old_name
if "patch_embed" in old_name:
_a , _a , _a : int = old_name.split('.' )
if layer == "0":
_a : Dict = old_name.replace('0' , 'convolution1' )
elif layer == "1":
_a : List[str] = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
_a : List[str] = old_name.replace('3' , 'convolution2' )
else:
_a : Union[str, Any] = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , lowerCAmelCase_ ):
_a : Dict = r'\b\d{2}\b'
if bool(re.search(lowerCAmelCase_ , lowerCAmelCase_ ) ):
_a : Dict = re.search(r'\d\.\d\d.' , lowerCAmelCase_ ).group()
else:
_a : Optional[int] = re.search(r'\d\.\d.' , lowerCAmelCase_ ).group()
if int(match[0] ) < 6:
_a : int = old_name.replace(lowerCAmelCase_ , '' )
_a : Union[str, Any] = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
_a : int = 'intermediate_stages.' + trimmed_name
else:
_a : Tuple = old_name.replace(lowerCAmelCase_ , '' )
if int(match[2] ) < num_meta4D_last_stage:
_a : str = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
_a : int = str(int(match[2] ) - num_meta4D_last_stage )
_a : List[str] = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
_a : Union[str, Any] = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
_a : List[Any] = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
_a : Dict = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
_a : List[Any] = trimmed_name.replace('fc2' , 'linear_out' )
_a : Dict = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , lowerCAmelCase_ ):
_a : List[str] = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
_a : Optional[int] = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_a : Tuple = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_a : Union[str, Any] = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
_a : Tuple = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
_a : List[Any] = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
_a : Dict = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
_a : Tuple = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_a : int = new_name.replace('norm' , 'layernorm' )
_a : Any = 'efficientformer.' + new_name
else:
_a : Optional[int] = 'efficientformer.encoder.' + new_name
return new_name
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
for key in checkpoint.copy().keys():
_a : List[str] = checkpoint.pop(lowerCAmelCase_ )
_a : List[str] = val
return checkpoint
def __lowerCamelCase ( ) -> Any:
_a : int = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_a : List[str] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return image
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : int = torch.load(lowerCAmelCase_ , map_location='cpu' )['model']
_a : Any = EfficientFormerConfig.from_json_file(lowerCAmelCase_ )
_a : Optional[int] = EfficientFormerForImageClassificationWithTeacher(lowerCAmelCase_ )
_a : List[Any] = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
_a : Dict = config.depths[-1] - config.num_metaad_blocks + 1
_a : int = convert_torch_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
model.eval()
_a : Any = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
_a : Any = prepare_img()
_a : Dict = 256
_a : Dict = 224
_a : int = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
_a : Optional[int] = processor(images=lowerCAmelCase_ , return_tensors='pt' ).pixel_values
# original processing pipeline
_a : str = Compose(
[
Resize(lowerCAmelCase_ , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(lowerCAmelCase_ ),
ToTensor(),
Normalize(lowerCAmelCase_ , lowerCAmelCase_ ),
] )
_a : Any = image_transforms(lowerCAmelCase_ ).unsqueeze(0 )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Tuple = model(lowerCAmelCase_ )
_a : Tuple = outputs.logits
_a : List[Any] = (1, 1000)
if "l1" in model_name:
_a : Optional[Any] = torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_a : List[Any] = torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , lowerCAmelCase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_a : Tuple = torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add model' , use_temp_dir=lowerCAmelCase_ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase_ , )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
__lowerCAmelCase = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 107 | 0 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
UpperCamelCase = F"""{src_lang}-{tgt_lang}"""
UpperCamelCase = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=A__ , exist_ok=A__ )
UpperCamelCase = os.path.join(A__ , 'README.md' )
print(F"""Generating {path}""" )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(A__ )
# make sure we are under the root of the project
_lowerCamelCase : str = Path(__file__).resolve().parent.parent.parent
_lowerCamelCase : Tuple = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowerCamelCase : Any = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 28 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=30 , lowerCAmelCase_=2 , lowerCAmelCase_=3 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=0.6 , lowerCAmelCase_=None , ) -> int:
_A = parent
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = mask_ratio
_A = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self ) -> str:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = ViTMAEModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = ViTMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ )
_A = (self.image_size // self.patch_size) ** 2
_A = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_A = 1
_A = ViTMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(lowerCAmelCase_ )
_A = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCAmelCase ( self ) -> Dict:
_A = self.prepare_config_and_inputs()
_A , _A , _A = config_and_inputs
_A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Union[str, Any] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase :List[Any] = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
lowerCamelCase :List[Any] = False
lowerCamelCase :Tuple = False
lowerCamelCase :int = False
lowerCamelCase :Any = False
def UpperCAmelCase ( self ) -> str:
_A = ViTMAEModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
def UpperCAmelCase ( self ) -> str:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(lowerCAmelCase_ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
# make masks reproducible
np.random.seed(2 )
_A = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
_A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_A = torch.from_numpy(lowerCAmelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_A = pt_noise
super().check_pt_tf_models(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_A = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_A = outputs[0].cpu().numpy()
_A = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase_ )
_A = model_class.from_pretrained(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
_A = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
# Make sure we don't have nans
_A = after_outputs[0].cpu().numpy()
_A = 0
_A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase_ , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self ) -> str:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = ViTMAEModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case ( ) -> List[str]:
_A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def UpperCAmelCase ( self ) -> Any:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_A = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowerCAmelCase_ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=lowerCAmelCase_ , return_tensors="""pt""" ).to(lowerCAmelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_A = ViTMAEConfig()
_A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_A = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
_A = model(**lowerCAmelCase_ , noise=torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ ) )
# verify the logits
_A = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_A = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCAmelCase_ ) , atol=1E-4 ) )
| 180 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""-m""" ,"""--pretrained_model_name_or_path""" ,type=__A ,default=__A ,required=__A ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,)
parser.add_argument(
"""-c""" ,"""--caption""" ,type=__A ,default="""robotic cat with wings""" ,help="""Text used to generate images.""" ,)
parser.add_argument(
"""-n""" ,"""--images_num""" ,type=__A ,default=4 ,help="""How much images to generate.""" ,)
parser.add_argument(
"""-s""" ,"""--seed""" ,type=__A ,default=42 ,help="""Seed for random process.""" ,)
parser.add_argument(
"""-ci""" ,"""--cuda_id""" ,type=__A ,default=0 ,help="""cuda_id.""" ,)
__UpperCamelCase = parser.parse_args()
return args
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
if not len(__A ) == rows * cols:
raise ValueError("""The specified number of rows and columns are not correct.""" )
__UpperCamelCase , __UpperCamelCase = imgs[0].size
__UpperCamelCase = Image.new("""RGB""" ,size=(cols * w, rows * h) )
__UpperCamelCase , __UpperCamelCase = grid.size
for i, img in enumerate(__A ):
grid.paste(__A ,box=(i % cols * w, i // cols * h) )
return grid
def _lowercase ( __A ,__A="robotic cat with wings" ,__A=7.5 ,__A=50 ,__A=1 ,__A=42 ,):
'''simple docstring'''
__UpperCamelCase = torch.Generator(pipeline.device ).manual_seed(__A )
__UpperCamelCase = pipeline(
__A ,guidance_scale=__A ,num_inference_steps=__A ,generator=__A ,num_images_per_prompt=__A ,).images
__UpperCamelCase = int(math.sqrt(__A ) )
__UpperCamelCase = image_grid(__A ,rows=_rows ,cols=num_images_per_prompt // _rows )
return grid, images
a__ : str = parse_args()
# Load models and create wrapper for stable diffusion
a__ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
a__ : Any = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
a__ : Dict = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
a__ : Union[str, Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
a__ : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
a__ : Optional[int] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
a__ : Tuple = unet.to(torch.device('cuda', args.cuda_id))
a__ : Any = pipeline.to(unet.device)
a__ : List[str] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
a__ : Any = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 350 |
'''simple docstring'''
from PIL import Image
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = image.size
__UpperCamelCase = 0
__UpperCamelCase = image.load()
for i in range(__A ):
for j in range(__A ):
__UpperCamelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
__UpperCamelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
a__ : Optional[int] = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 243 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """rwkv"""
lowerCamelCase_ : List[Any] = {"""max_position_embeddings""": """context_length"""}
def __init__( self , UpperCamelCase__=5_0277 , UpperCamelCase__=1024 , UpperCamelCase__=4096 , UpperCamelCase__=32 , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=1e-5 , UpperCamelCase__=0 , UpperCamelCase__=0 , UpperCamelCase__=6 , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Union[str, Any]:
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = context_length
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : int = rescale_every
lowerCamelCase : List[str] = use_cache
lowerCamelCase : Optional[int] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 48 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCAmelCase : List[Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def __magic_name__ ( A : Dict, A : Union[str, Any], A : Optional[int]=None ):
'''simple docstring'''
if rng is None:
a = random.Random()
a = 1
for dim in shape:
total_dims *= dim
a = []
for _ in range(A ):
values.append(rng.randint(0, vocab_size - 1 ) )
a = np.array(A, dtype=jnp.intaa ).reshape(A )
return output
def __magic_name__ ( A : Dict, A : Union[str, Any]=None ):
'''simple docstring'''
a = ids_tensor(A, vocab_size=2, rng=A )
# make sure that at least one token is attended to for each batch
a = 1
return attn_mask
@require_flax
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Any = ()
def __UpperCAmelCase ( self : int ) -> List[str]:
a , a = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
a = 2
a = inputs["input_ids"].shape[-1] // 2
a = inputs["input_ids"][:max_batch_size, :sequence_length]
a = jnp.ones_like(__lowerCamelCase )
a = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
a = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
a = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 0
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model_class.__name__[4:] # Skip the "Flax" at the beginning
a = getattr(__lowerCamelCase , __lowerCamelCase )
a = pt_model_class(__lowerCamelCase ).eval()
a = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params )
a = flax_model.generate(__lowerCamelCase ).sequences
a = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : int ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = False
a = max_length
a = 2
a = 2
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
a = True
a = max_length
a = 0.8
a = 10
a = 0.3
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a , a , a , a = self._get_input_ids_and_config()
a = max_length
a = 2
a = 1
a = 8
a = 9
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = False
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = True
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
a , a , a , a = self._get_input_ids_and_config()
# pad attention mask on the left
a = attention_mask.at[(0, 0)].set(0 )
a = 2
a = max_length
for model_class in self.all_generative_model_classes:
a = model_class(__lowerCamelCase )
a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase )
a = jit(model.generate )
a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
a = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
a = "Hello world"
a = tokenizer(__lowerCamelCase , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__lowerCamelCase , "do_samples" ):
model.generate(__lowerCamelCase , do_samples=__lowerCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__lowerCamelCase , "foo" ):
a = {"foo": "bar"}
model.generate(__lowerCamelCase , **__lowerCamelCase )
| 107 | 0 |
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 350 |
import argparse
import copy
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = {}
with open(UpperCamelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
with open(UpperCamelCase_ ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase_ ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(UpperCamelCase_ )
snake_case = distance_of_first_solution + int(UpperCamelCase_ )
snake_case = best_node
first_solution.append(UpperCamelCase_ )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(UpperCamelCase_ )
for kn in solution[1:-1]:
snake_case = solution.index(UpperCamelCase_ )
if n == kn:
continue
snake_case = copy.deepcopy(UpperCamelCase_ )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(UpperCamelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(UpperCamelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda UpperCamelCase_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(UpperCamelCase_ ,UpperCamelCase_ )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(UpperCamelCase_ ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(UpperCamelCase_ ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(UpperCamelCase_ ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def UpperCAmelCase__ (UpperCamelCase_=None ):
"""simple docstring"""
snake_case = generate_neighbours(args.File )
snake_case , snake_case = generate_first_solution(
args.File ,UpperCamelCase_ )
snake_case , snake_case = tabu_search(
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ,args.Iterations ,args.Size ,)
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 213 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_snake_case = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_snake_case = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
_snake_case = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`.")
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf", inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"),
}), codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"], reference_urls=[
"https://github.com/m-popovic/chrF",
], )
def snake_case__ ( self, __a, __a, __a = CHRF.CHAR_ORDER, __a = CHRF.WORD_ORDER, __a = CHRF.BETA, __a = False, __a = False, __a = False, ):
'''simple docstring'''
_lowerCAmelCase : str = len(references[0])
if any(len(__UpperCAmelCase) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
_lowerCAmelCase : str = [[refs[i] for refs in references] for i in range(__UpperCAmelCase)]
_lowerCAmelCase : str = CHRF(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase)
_lowerCAmelCase : Optional[Any] = sb_chrf.corpus_score(__UpperCAmelCase, __UpperCAmelCase)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 36 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__lowerCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__:
lowerCAmelCase__ : str = field(
default=__A , metadata={'help': 'Model type selected in the list: ' + ', '.join(__A )} )
lowerCAmelCase__ : str = field(
default=__A , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ : int = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ : int = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ : int = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ : bool = field(
default=__A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ : bool = field(
default=__A , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ : float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ : int = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ : int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Dict = 'train'
lowerCAmelCase__ : int = 'dev'
class UpperCamelCase__( __A ):
lowerCAmelCase__ : SquadDataTrainingArguments
lowerCAmelCase__ : List[SquadFeatures]
lowerCAmelCase__ : Split
lowerCAmelCase__ : bool
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = Split.train ,__UpperCAmelCase = False ,__UpperCAmelCase = None ,__UpperCAmelCase = "pt" ,) -> Optional[Any]:
A__ = args
A__ = is_language_sensitive
A__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
A__ = mode
# Load data features from cache or dataset file
A__ = 'v2' if args.version_2_with_negative else 'v1'
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir ,f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + '.lock'
with FileLock(__UpperCAmelCase ):
if os.path.exists(__UpperCAmelCase ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(__UpperCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A__ = self.old_features['features']
A__ = self.old_features.get('dataset' ,__UpperCAmelCase )
A__ = self.old_features.get('examples' ,__UpperCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' ,time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
A__ , A__ = squad_convert_examples_to_features(
examples=self.examples ,tokenizer=__UpperCAmelCase ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=__UpperCAmelCase ,)
A__ = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} ,__UpperCAmelCase ,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Union[str, Any]:
return len(self.features )
def __getitem__( self ,__UpperCAmelCase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
A__ = self.features[i]
A__ = torch.tensor(feature.input_ids ,dtype=torch.long )
A__ = torch.tensor(feature.attention_mask ,dtype=torch.long )
A__ = torch.tensor(feature.token_type_ids ,dtype=torch.long )
A__ = torch.tensor(feature.cls_index ,dtype=torch.long )
A__ = torch.tensor(feature.p_mask ,dtype=torch.float )
A__ = torch.tensor(feature.is_impossible ,dtype=torch.float )
A__ = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A__ = torch.tensor(feature.start_position ,dtype=torch.long )
A__ = torch.tensor(feature.end_position ,dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 221 | 0 |
"""simple docstring"""
def a__ ( __lowercase ) -> int:
assert column_title.isupper()
_A = 0
_A = len(__lowercase ) - 1
_A = 0
while index >= 0:
_A = (ord(column_title[index] ) - 64) * pow(26 , __lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 163 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {"vocab_file": "spm_char.model"}
a_ = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
a_ = {
"microsoft/speecht5_asr": 10_24,
"microsoft/speecht5_tts": 10_24,
"microsoft/speecht5_vc": 10_24,
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : Any , a__ : List[Any] , a__ : Optional[int]="<s>" , a__ : List[Any]="</s>" , a__ : int="<unk>" , a__ : Any="<pad>" , a__ : Optional[Dict[str, Any]] = None , **a__ : str , ) -> None:
'''simple docstring'''
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@property
def a_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def a_ ( self : int ) -> Tuple:
'''simple docstring'''
_A = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : Optional[Any] , a__ : Any ) -> List[str]:
'''simple docstring'''
_A = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a_ ( self : Any , a__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(a__ , out_type=a__ )
def a_ ( self : Optional[Any] , a__ : Optional[int] ) -> Dict:
'''simple docstring'''
return self.sp_model.piece_to_id(a__ )
def a_ ( self : List[str] , a__ : str ) -> Union[str, Any]:
'''simple docstring'''
_A = self.sp_model.IdToPiece(a__ )
return token
def a_ ( self : Optional[int] , a__ : Union[str, Any] ) -> str:
'''simple docstring'''
_A = []
_A = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a__ ) + token
_A = []
else:
current_sub_tokens.append(a__ )
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a_ ( self : str , a__ : Dict , a__ : Dict=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def a_ ( self : Any , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
_A = [1]
if token_ids_a is None:
return ([0] * len(a__ )) + suffix_ones
return ([0] * len(a__ )) + ([0] * len(a__ )) + suffix_ones
def a_ ( self : str , a__ : str , a__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(a__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 163 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
a ="""\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
a ="""\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
a =r"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : List[Any]):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string'),
'references': datasets.Value('string'),
}) ,homepage='https://github.com/hendrycks/math' ,codebase_urls=['https://github.com/hendrycks/math'] ,)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : str = 0.0
for i, j in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
n_correct += 1.0 if math_equivalence.is_equiv(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else 0.0
__lowerCamelCase : Tuple = n_correct / len(SCREAMING_SNAKE_CASE__)
return {
"accuracy": accuracy,
}
| 73 |
import csv
import tweepy
# Twitter API credentials
a =""""""
a =""""""
a =""""""
a =""""""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
# authorize twitter, initialize tweepy
__lowerCamelCase : Tuple = tweepy.OAuthHandler(lowerCamelCase__ , lowerCamelCase__ )
auth.set_access_token(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Optional[int] = tweepy.API(lowerCamelCase__ )
# initialize a list to hold all the tweepy Tweets
__lowerCamelCase : str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
__lowerCamelCase : Union[str, Any] = api.user_timeline(screen_name=lowerCamelCase__ , count=2_0_0 )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# save the id of the oldest tweet less one
__lowerCamelCase : Any = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowerCamelCase__ ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
__lowerCamelCase : str = api.user_timeline(
screen_name=lowerCamelCase__ , count=2_0_0 , max_id=lowerCamelCase__ )
# save most recent tweets
alltweets.extend(lowerCamelCase__ )
# update the id of the oldest tweet less one
__lowerCamelCase : Optional[int] = alltweets[-1].id - 1
print(F"...{len(lowerCamelCase__ )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
__lowerCamelCase : str = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
__lowerCamelCase : Any = csv.writer(lowerCamelCase__ )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(lowerCamelCase__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 73 | 1 |
from ...processing_utils import ProcessorMixin
class a__ ( UpperCamelCase__ ):
a : Union[str, Any] = """WhisperFeatureExtractor"""
a : str = """WhisperTokenizer"""
def __init__( self , A , A ) -> str:
'''simple docstring'''
super().__init__(A , A )
a = self.feature_extractor
a = False
def lowerCAmelCase_ ( self , A=None , A=None , A=True ) -> Tuple:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=A , language=A , no_timestamps=A )
def __call__( self , *A , **A ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*A , **A )
a = kwargs.pop("audio" , A )
a = kwargs.pop("sampling_rate" , A )
a = kwargs.pop("text" , A )
if len(A ) > 0:
a = args[0]
a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a = self.feature_extractor(A , *A , sampling_rate=A , **A )
if text is not None:
a = self.tokenizer(A , **A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a = encodings["input_ids"]
return inputs
def lowerCAmelCase_ ( self , *A , **A ) -> Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*A , **A )
def lowerCAmelCase_ ( self , *A , **A ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*A , **A )
def lowerCAmelCase_ ( self , A , A="np" ) -> Dict:
'''simple docstring'''
return self.tokenizer.get_prompt_ids(A , return_tensors=A )
| 180 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> list[int]:
a = 2
a = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__UpperCamelCase)
if n > 1:
factors.append(__UpperCamelCase)
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 | 1 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def A_ ( ):
SCREAMING_SNAKE_CASE_: Optional[Any] = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
SCREAMING_SNAKE_CASE_: Dict = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(__lowerCamelCase )
# Let's go
SCREAMING_SNAKE_CASE_: Tuple = parser.parse_args()
if not hasattr(__lowerCamelCase , "func" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_: int = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 13 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : int = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'deberta-v2'
def __init__( self : Optional[Any], lowerCamelCase : Optional[int]=12_8100, lowerCamelCase : List[Any]=1536, lowerCamelCase : Dict=24, lowerCamelCase : Any=24, lowerCamelCase : Union[str, Any]=6144, lowerCamelCase : List[Any]="gelu", lowerCamelCase : int=0.1, lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : Union[str, Any]=512, lowerCamelCase : Optional[Any]=0, lowerCamelCase : Any=0.02, lowerCamelCase : int=1E-7, lowerCamelCase : Union[str, Any]=False, lowerCamelCase : Union[str, Any]=-1, lowerCamelCase : Tuple=0, lowerCamelCase : Union[str, Any]=True, lowerCamelCase : int=None, lowerCamelCase : Dict=0, lowerCamelCase : Tuple="gelu", **lowerCamelCase : Optional[int], )-> Union[str, Any]:
super().__init__(**lowerCamelCase )
lowerCamelCase__ : str =hidden_size
lowerCamelCase__ : Optional[int] =num_hidden_layers
lowerCamelCase__ : Optional[Any] =num_attention_heads
lowerCamelCase__ : List[Any] =intermediate_size
lowerCamelCase__ : int =hidden_act
lowerCamelCase__ : Tuple =hidden_dropout_prob
lowerCamelCase__ : Union[str, Any] =attention_probs_dropout_prob
lowerCamelCase__ : Optional[Any] =max_position_embeddings
lowerCamelCase__ : int =type_vocab_size
lowerCamelCase__ : Tuple =initializer_range
lowerCamelCase__ : Tuple =relative_attention
lowerCamelCase__ : Optional[Any] =max_relative_positions
lowerCamelCase__ : List[Any] =pad_token_id
lowerCamelCase__ : int =position_biased_input
# Backwards compatibility
if type(lowerCamelCase ) == str:
lowerCamelCase__ : Union[str, Any] =[x.strip() for x in pos_att_type.lower().split('''|''' )]
lowerCamelCase__ : Tuple =pos_att_type
lowerCamelCase__ : Union[str, Any] =vocab_size
lowerCamelCase__ : Optional[int] =layer_norm_eps
lowerCamelCase__ : Dict =kwargs.get('''pooler_hidden_size''', lowerCamelCase )
lowerCamelCase__ : Tuple =pooler_dropout
lowerCamelCase__ : List[Any] =pooler_hidden_act
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
@property
def snake_case ( self : List[str] )-> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : Union[str, Any] ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : Any ={0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def snake_case ( self : List[str] )-> int:
return 12
def snake_case ( self : str, lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : int = -1, lowerCamelCase : bool = False, lowerCamelCase : Optional["TensorType"] = None, lowerCamelCase : int = 3, lowerCamelCase : int = 40, lowerCamelCase : int = 40, lowerCamelCase : "PreTrainedTokenizerBase" = None, )-> Mapping[str, Any]:
lowerCamelCase__ : List[Any] =super().generate_dummy_inputs(preprocessor=lowerCamelCase, framework=lowerCamelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 238 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ = IFInpaintingSuperResolutionPipeline
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
lowercase_ = PipelineTesterMixin.required_optional_params - {'latents'}
def _UpperCamelCase ( self ) -> Union[str, Any]:
return self._get_superresolution_dummy_components()
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ) -> Any:
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCamelCase : Optional[int] = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase : Optional[Any] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCamelCase : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCamelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _UpperCamelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCamelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _UpperCamelCase ( self ) -> List[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCamelCase ( self ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCamelCase ( self ) -> Any:
self._test_save_load_local()
def _UpperCamelCase ( self ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 356 |
"""simple docstring"""
def UpperCAmelCase ( a_ = 1000 ):
'''simple docstring'''
lowerCamelCase : Dict = 2**power
lowerCamelCase : List[str] = str(a_ )
lowerCamelCase : Dict = list(a_ )
lowerCamelCase : Optional[Any] = 0
for i in list_num:
sum_of_num += int(a_ )
return sum_of_num
if __name__ == "__main__":
_A = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
_A = solution(power)
print('Sum of the digits is: ', result)
| 205 | 0 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
@property
def lowercase__ ( self : Union[str, Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self : int ):
lowerCAmelCase : Tuple = ort.SessionOptions()
lowerCAmelCase : str = False
return options
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
lowerCAmelCase : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
lowerCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
lowerCAmelCase : Optional[Any] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCAmelCase : List[str] = 'A red cat sitting on a park bench'
lowerCAmelCase : Tuple = np.random.RandomState(0 )
lowerCAmelCase : Optional[Any] = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=UpperCAmelCase_ , output_type='np' , )
lowerCAmelCase : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 138 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=lowerCAmelCase )
class __A :
lowerCAmelCase_ : str
lowerCAmelCase_ : str
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[str] = None
@dataclass(frozen=lowerCAmelCase )
class __A :
lowerCAmelCase_ : List[int]
lowerCAmelCase_ : Optional[List[int]] = None
lowerCAmelCase_ : Optional[List[int]] = None
lowerCAmelCase_ : Optional[Union[int, float]] = None
lowerCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[InputFeatures]
def __init__( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str=False , UpperCAmelCase_ : bool = False , ):
lowerCAmelCase : List[Any] = hans_processors[task]()
lowerCAmelCase : Tuple = os.path.join(
UpperCAmelCase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase_ ) , UpperCAmelCase_ , ) , )
lowerCAmelCase : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase , lowerCAmelCase : List[Any] = label_list[2], label_list[1]
lowerCAmelCase : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase : Any = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
lowerCAmelCase : int = torch.load(UpperCAmelCase_ )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
lowerCAmelCase : Optional[int] = (
processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
)
logger.info('Training examples: %s' , len(UpperCAmelCase_ ) )
lowerCAmelCase : List[str] = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info('Saving features into cached file %s' , UpperCAmelCase_ )
torch.save(self.features , UpperCAmelCase_ )
def __len__( self : str ):
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : List[str] ):
return self.features[i]
def lowercase__ ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
lowerCAmelCase_ : List[InputFeatures]
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = 128 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : bool = False , ):
lowerCAmelCase : List[Any] = hans_processors[task]()
lowerCAmelCase : List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase , lowerCAmelCase : int = label_list[2], label_list[1]
lowerCAmelCase : str = label_list
lowerCAmelCase : Union[str, Any] = processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCAmelCase : Tuple = tf.data.Dataset.from_generator(
UpperCAmelCase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowercase__ ( self : Dict ):
return self.dataset
def __len__( self : Optional[int] ):
return len(self.features )
def __getitem__( self : int , UpperCAmelCase_ : List[Any] ):
return self.features[i]
def lowercase__ ( self : int ):
return self.label_list
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Dict , UpperCAmelCase_ : Dict ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Any ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def lowercase__ ( self : Optional[Any] ):
return ["contradiction", "entailment", "neutral"]
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = []
for i, line in enumerate(UpperCAmelCase_ ):
if i == 0:
continue
lowerCAmelCase : Union[str, Any] = '%s-%s' % (set_type, line[0])
lowerCAmelCase : Optional[int] = line[5]
lowerCAmelCase : Optional[int] = line[6]
lowerCAmelCase : Dict = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCAmelCase : List[str] = line[0]
examples.append(InputExample(guid=UpperCAmelCase_ , text_a=UpperCAmelCase_ , text_b=UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
return examples
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ) -> Dict:
'''simple docstring'''
lowerCAmelCase : List[Any] = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCAmelCase : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ), desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCAmelCase : Any = tokenizer(
example.text_a, example.text_b, add_special_tokens=_UpperCAmelCase, max_length=_UpperCAmelCase, padding='max_length', truncation=_UpperCAmelCase, return_overflowing_tokens=_UpperCAmelCase, )
lowerCAmelCase : Union[str, Any] = label_map[example.label] if example.label in label_map else 0
lowerCAmelCase : Optional[Any] = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase, label=_UpperCAmelCase, pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
__A : Union[str, Any] = {
'''hans''': 3,
}
__A : List[Any] = {
'''hans''': HansProcessor,
}
| 138 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=99 , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Union[str, Any]=30 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Dict=None , ) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =parent
lowerCamelCase__: List[str] =batch_size
lowerCamelCase__: Tuple =decoder_seq_length
# For common tests
lowerCamelCase__: int =self.decoder_seq_length
lowerCamelCase__: str =is_training
lowerCamelCase__: Union[str, Any] =use_attention_mask
lowerCamelCase__: Any =use_labels
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Optional[Any] =d_model
lowerCamelCase__: Optional[Any] =d_model
lowerCamelCase__: Optional[Any] =decoder_layers
lowerCamelCase__: List[str] =decoder_layers
lowerCamelCase__: List[str] =decoder_ffn_dim
lowerCamelCase__: List[Any] =decoder_attention_heads
lowerCamelCase__: List[str] =decoder_attention_heads
lowerCamelCase__: Optional[int] =eos_token_id
lowerCamelCase__: Any =bos_token_id
lowerCamelCase__: str =pad_token_id
lowerCamelCase__: List[str] =decoder_start_token_id
lowerCamelCase__: Optional[Any] =use_cache
lowerCamelCase__: Optional[int] =max_position_embeddings
lowerCamelCase__: Any =None
lowerCamelCase__: str =decoder_seq_length
lowerCamelCase__: int =2
lowerCamelCase__: Optional[int] =1
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
lowerCamelCase__: Tuple =None
if self.use_attention_mask:
lowerCamelCase__: int =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2)
lowerCamelCase__: List[str] =None
if self.use_labels:
lowerCamelCase__: Optional[Any] =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
lowerCamelCase__: Any =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: int =True
lowerCamelCase__: List[str] =TrOCRDecoder(config=UpperCAmelCase_).to(UpperCAmelCase_).eval()
lowerCamelCase__: List[str] =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , use_cache=UpperCAmelCase_)
lowerCamelCase__: List[Any] =model(UpperCAmelCase_)
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , use_cache=UpperCAmelCase_)
self.parent.assertTrue(len(UpperCAmelCase_) == len(UpperCAmelCase_))
self.parent.assertTrue(len(UpperCAmelCase_) == len(UpperCAmelCase_) + 1)
lowerCamelCase__: Dict =outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__: Optional[int] =ids_tensor((2, 1) , config.vocab_size - 1) + 1
# append to next input_ids and
lowerCamelCase__: List[Any] =torch.cat([input_ids, next_tokens] , dim=-1)
lowerCamelCase__: Optional[Any] =model(UpperCAmelCase_)["last_hidden_state"]
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , past_key_values=UpperCAmelCase_)["last_hidden_state"]
# select random slice
lowerCamelCase__: Optional[Any] =ids_tensor((1,) , output_from_past.shape[-1]).item()
lowerCamelCase__: Any =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowerCamelCase__: str =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3)
def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: str =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: int =config_and_inputs
lowerCamelCase__: Dict ={"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase_ = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase_ = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
lowercase_ = True
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
lowerCamelCase__: Dict =TrOCRStandaloneDecoderModelTester(self , is_training=UpperCAmelCase_)
lowerCamelCase__: Any =ConfigTester(self , config_class=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
return
@unittest.skip("The model doesn't support left padding") # and it's not used enough to be worth fixing :)
def SCREAMING_SNAKE_CASE_ (self : str) ->Tuple:
'''simple docstring'''
pass
| 273 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : str) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] =[]
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any) ->Dict:
'''simple docstring'''
self.events.append("on_init_end")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , **UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
self.events.append("on_train_begin")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str) ->int:
'''simple docstring'''
self.events.append("on_train_end")
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int]) ->List[Any]:
'''simple docstring'''
self.events.append("on_epoch_begin")
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , **UpperCAmelCase_ : Any) ->Tuple:
'''simple docstring'''
self.events.append("on_epoch_end")
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]) ->Optional[int]:
'''simple docstring'''
self.events.append("on_step_begin")
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[str]) ->Tuple:
'''simple docstring'''
self.events.append("on_step_end")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : str) ->Optional[int]:
'''simple docstring'''
self.events.append("on_evaluate")
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any) ->int:
'''simple docstring'''
self.events.append("on_predict")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[Any]) ->Any:
'''simple docstring'''
self.events.append("on_save")
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any]) ->str:
'''simple docstring'''
self.events.append("on_log")
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str]) ->Optional[int]:
'''simple docstring'''
self.events.append("on_prediction_step")
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Tuple =tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
shutil.rmtree(self.output_dir)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : str=64 , UpperCAmelCase_ : List[Any]=64 , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : List[str]=False , **UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =RegressionDataset(length=UpperCAmelCase_)
lowerCamelCase__: int =RegressionDataset(length=UpperCAmelCase_)
lowerCamelCase__: str =RegressionModelConfig(a=UpperCAmelCase_ , b=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =RegressionPreTrainedModel(UpperCAmelCase_)
lowerCamelCase__: int =TrainingArguments(self.output_dir , disable_tqdm=UpperCAmelCase_ , report_to=[] , **UpperCAmelCase_)
return Trainer(
UpperCAmelCase_ , UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , callbacks=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str]) ->Dict:
'''simple docstring'''
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
# Order doesn't matter
lowerCamelCase__: Dict =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: cb.__name__ if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cb.__class__.__name__)
lowerCamelCase__: Optional[int] =sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_: cb.__name__ if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cb.__class__.__name__)
for cba, cba in zip(UpperCAmelCase_ , UpperCAmelCase_):
if isinstance(UpperCAmelCase_ , UpperCAmelCase_) and isinstance(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_) and not isinstance(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(UpperCAmelCase_ , cba.__class__)
elif not isinstance(UpperCAmelCase_ , UpperCAmelCase_) and isinstance(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(cba.__class__ , UpperCAmelCase_)
else:
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =["on_init_end", "on_train_begin"]
lowerCamelCase__: List[str] =0
lowerCamelCase__: List[Any] =len(trainer.get_eval_dataloader())
lowerCamelCase__: Dict =["on_prediction_step"] * len(trainer.get_eval_dataloader()) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs):
expected_events.append("on_epoch_begin")
for _ in range(UpperCAmelCase_):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log")
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save")
expected_events.append("on_epoch_end")
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.get_trainer()
lowerCamelCase__: Any =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_)
# Callbacks passed at init are added to the default callbacks
lowerCamelCase__: Dict =self.get_trainer(callbacks=[MyTestTrainerCallback])
expected_callbacks.append(UpperCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_)
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCamelCase__: int =self.get_trainer(disable_tqdm=UpperCAmelCase_)
lowerCamelCase__: Tuple =DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCamelCase__: Optional[int] =self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCAmelCase_)
expected_callbacks.remove(UpperCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_)
lowerCamelCase__: Dict =self.get_trainer()
lowerCamelCase__: str =trainer.pop_callback(UpperCAmelCase_)
self.assertEqual(cb.__class__ , UpperCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_)
trainer.add_callback(UpperCAmelCase_)
expected_callbacks.insert(0 , UpperCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_)
# We can also add, pop, or remove by instance
lowerCamelCase__: List[str] =self.get_trainer()
lowerCamelCase__: List[str] =trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCAmelCase_)
expected_callbacks.remove(UpperCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_)
lowerCamelCase__: str =self.get_trainer()
lowerCamelCase__: List[Any] =trainer.callback_handler.callbacks[0]
lowerCamelCase__: Dict =trainer.pop_callback(UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_)
trainer.add_callback(UpperCAmelCase_)
expected_callbacks.insert(0 , UpperCAmelCase_)
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.get_trainer(callbacks=[MyTestTrainerCallback])
trainer.train()
lowerCamelCase__: int =trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_))
# Independent log/save/eval
lowerCamelCase__: Dict =self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5)
trainer.train()
lowerCamelCase__: Optional[int] =trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_))
lowerCamelCase__: Any =self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5)
trainer.train()
lowerCamelCase__: List[Any] =trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_))
lowerCamelCase__: int =self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps")
trainer.train()
lowerCamelCase__: str =trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_))
lowerCamelCase__: Dict =self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch")
trainer.train()
lowerCamelCase__: Tuple =trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_))
# A bit of everything
lowerCamelCase__: Tuple =self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
lowerCamelCase__: int =trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCAmelCase_ , self.get_expected_events(UpperCAmelCase_))
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning") as warn_mock:
lowerCamelCase__: Optional[int] =self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCAmelCase_) in warn_mock.call_args[0][0]
| 273 | 1 |
"""simple docstring"""
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
# Recurse if needed
if "." in tensor_name:
UpperCAmelCase__ = tensor_name.split('.' )
for split in splits[:-1]:
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
UpperCAmelCase__ = new_module
UpperCAmelCase__ = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' )
UpperCAmelCase__ = tensor_name in module._buffers
UpperCAmelCase__ = getattr(lowerCamelCase , lowerCamelCase )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if is_buffer or not is_bitsandbytes_available():
UpperCAmelCase__ = False
UpperCAmelCase__ = False
else:
UpperCAmelCase__ = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
UpperCAmelCase__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
UpperCAmelCase__ = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCAmelCase__ = old_value.to(lowerCamelCase )
elif isinstance(lowerCamelCase , torch.Tensor ):
UpperCAmelCase__ = value.to('cpu' )
if value.dtype == torch.inta:
UpperCAmelCase__ = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
UpperCAmelCase__ = torch.tensor(lowerCamelCase , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowerCamelCase ) and fpaa_statistics is None:
UpperCAmelCase__ = new_value.T
UpperCAmelCase__ = old_value.__dict__
if is_abit:
UpperCAmelCase__ = bnb.nn.IntaParams(lowerCamelCase , requires_grad=lowerCamelCase , **lowerCamelCase ).to(lowerCamelCase )
elif is_abit:
UpperCAmelCase__ = bnb.nn.Paramsabit(lowerCamelCase , requires_grad=lowerCamelCase , **lowerCamelCase ).to(lowerCamelCase )
UpperCAmelCase__ = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(lowerCamelCase ) )
else:
if value is None:
UpperCAmelCase__ = old_value.to(lowerCamelCase )
elif isinstance(lowerCamelCase , torch.Tensor ):
UpperCAmelCase__ = value.to(lowerCamelCase )
else:
UpperCAmelCase__ = torch.tensor(lowerCamelCase , device=lowerCamelCase )
if is_buffer:
UpperCAmelCase__ = new_value
else:
UpperCAmelCase__ = nn.Parameter(lowerCamelCase , requires_grad=old_value.requires_grad )
UpperCAmelCase__ = new_value
def a_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=False ):
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase__ = []
current_key_name.append(lowerCamelCase )
if (isinstance(lowerCamelCase , nn.Linear ) or isinstance(lowerCamelCase , lowerCamelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(lowerCamelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ , UpperCAmelCase__ = module.weight.shape
else:
UpperCAmelCase__ = module.in_features
UpperCAmelCase__ = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCAmelCase__ = bnb.nn.LinearabitLt(
lowerCamelCase , lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCAmelCase__ = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCAmelCase__ = bnb.nn.Linearabit(
lowerCamelCase , lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCAmelCase__ = True
# Store the module class in case we need to transpose the weight later
UpperCAmelCase__ = type(lowerCamelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCamelCase )
if len(list(module.children() ) ) > 0:
UpperCAmelCase__ , UpperCAmelCase__ = _replace_with_bnb_linear(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_been_replaced=lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None ):
UpperCAmelCase__ = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCAmelCase__ , UpperCAmelCase__ = _replace_with_bnb_linear(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def a_ ( *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , lowerCamelCase , )
return replace_with_bnb_linear(*lowerCamelCase , **lowerCamelCase )
def a_ ( *lowerCamelCase , **lowerCamelCase ):
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , lowerCamelCase , )
return set_module_quantized_tensor_to_device(*lowerCamelCase , **lowerCamelCase )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = deepcopy(lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCAmelCase__ = find_tied_parameters(lowerCamelCase )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase__ = sum(lowerCamelCase , [] )
UpperCAmelCase__ = len(lowerCamelCase ) > 0
# Check if it is a base model
UpperCAmelCase__ = not hasattr(lowerCamelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase__ = list(model.named_children() )
UpperCAmelCase__ = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase__ = set(lowerCamelCase ) - set(lowerCamelCase )
UpperCAmelCase__ = list(set(lowerCamelCase ) ) + list(lowerCamelCase )
# remove ".weight" from the keys
UpperCAmelCase__ = ['.weight', '.bias']
UpperCAmelCase__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase__ = name.replace(lowerCamelCase , '' )
filtered_module_names.append(lowerCamelCase )
return filtered_module_names
| 98 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class _a :
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
SCREAMING_SNAKE_CASE : Optional[int] = len(A ) - 1
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE : list[float] = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree, A ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(A ), 5 ) == 1
return output_values
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
assert 0 <= t <= 1, "Time t must be between 0 and 1."
SCREAMING_SNAKE_CASE : str = self.basis_function(A )
SCREAMING_SNAKE_CASE : str = 0.0
SCREAMING_SNAKE_CASE : List[Any] = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def UpperCamelCase_ ( self, A = 0.01 ):
'''simple docstring'''
from matplotlib import pyplot as plt # type: ignore
SCREAMING_SNAKE_CASE : list[float] = [] # x coordinates of points to plot
SCREAMING_SNAKE_CASE : list[float] = [] # y coordinates of points to plot
SCREAMING_SNAKE_CASE : List[str] = 0.0
while t <= 1:
SCREAMING_SNAKE_CASE : Optional[int] = self.bezier_curve_function(A )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
SCREAMING_SNAKE_CASE : List[Any] = [i[0] for i in self.list_of_points]
SCREAMING_SNAKE_CASE : Union[str, Any] = [i[1] for i in self.list_of_points]
plt.plot(
A, A, color='blue', label='Curve of Degree ' + str(self.degree ), )
plt.scatter(A, A, color='red', label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 251 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Union[str, Any] = """linear"""
snake_case : List[Any] = """cosine"""
snake_case : Union[str, Any] = """cosine_with_restarts"""
snake_case : Union[str, Any] = """polynomial"""
snake_case : List[Any] = """constant"""
snake_case : int = """constant_with_warmup"""
snake_case : Any = """piecewise_constant"""
def _UpperCamelCase (a__ :Optimizer , a__ :int = -1 ):
"""simple docstring"""
return LambdaLR(a__ , lambda a__ : 1 , last_epoch=a__ )
def _UpperCamelCase (a__ :Optimizer , a__ :int , a__ :int = -1 ):
"""simple docstring"""
def lr_lambda(a__ :int ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1.0 , a__ ) )
return 1.0
return LambdaLR(a__ , a__ , last_epoch=a__ )
def _UpperCamelCase (a__ :Optimizer , a__ :str , a__ :int = -1 ):
"""simple docstring"""
UpperCamelCase__ = {}
UpperCamelCase__ = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
UpperCamelCase__ , UpperCamelCase__ = rule_str.split(""":""" )
UpperCamelCase__ = int(a__ )
UpperCamelCase__ = float(a__ )
UpperCamelCase__ = value
UpperCamelCase__ = float(rule_list[-1] )
def create_rules_function(a__ :Dict , a__ :List[Any] ):
def rule_func(a__ :int ) -> float:
UpperCamelCase__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(a__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCamelCase__ = create_rules_function(a__ , a__ )
return LambdaLR(a__ , a__ , last_epoch=a__ )
def _UpperCamelCase (a__ :List[Any] , a__ :Dict , a__ :str , a__ :Optional[int]=-1 ):
"""simple docstring"""
def lr_lambda(a__ :int ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(a__ , a__ , a__ )
def _UpperCamelCase (a__ :Optimizer , a__ :int , a__ :int , a__ :float = 0.5 , a__ :int = -1 ):
"""simple docstring"""
def lr_lambda(a__ :Union[str, Any] ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
UpperCamelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(a__ ) * 2.0 * progress )) )
return LambdaLR(a__ , a__ , a__ )
def _UpperCamelCase (a__ :Optimizer , a__ :int , a__ :int , a__ :int = 1 , a__ :int = -1 ):
"""simple docstring"""
def lr_lambda(a__ :List[Any] ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
UpperCamelCase__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(a__ ) * progress) % 1.0) )) )
return LambdaLR(a__ , a__ , a__ )
def _UpperCamelCase (a__ :Tuple , a__ :Dict , a__ :List[str] , a__ :Optional[int]=1e-7 , a__ :str=1.0 , a__ :int=-1 ):
"""simple docstring"""
UpperCamelCase__ = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(a__ :int ):
if current_step < num_warmup_steps:
return float(a__ ) / float(max(1 , a__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCamelCase__ = lr_init - lr_end
UpperCamelCase__ = num_training_steps - num_warmup_steps
UpperCamelCase__ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCamelCase__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(a__ , a__ , a__ )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _UpperCamelCase (a__ :Union[str, SchedulerType] , a__ :Optimizer , a__ :Optional[str] = None , a__ :Optional[int] = None , a__ :Optional[int] = None , a__ :int = 1 , a__ :float = 1.0 , a__ :int = -1 , ):
"""simple docstring"""
UpperCamelCase__ = SchedulerType(a__ )
UpperCamelCase__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(a__ , last_epoch=a__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(a__ , step_rules=a__ , last_epoch=a__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(a__ , num_warmup_steps=a__ , last_epoch=a__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , num_cycles=a__ , last_epoch=a__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , power=a__ , last_epoch=a__ , )
return schedule_func(
a__ , num_warmup_steps=a__ , num_training_steps=a__ , last_epoch=a__ )
| 87 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase__ = logging.getLogger(__name__)
def _UpperCamelCase (a__ :Union[str, Any] , a__ :Optional[Any] ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=_a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=_a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=_a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __SCREAMING_SNAKE_CASE :
snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
snake_case : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
snake_case : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=_a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , a__ )
# Set seed
set_seed(training_args.seed )
try:
UpperCamelCase__ = processors[data_args.task_name]()
UpperCamelCase__ = processor.get_labels()
UpperCamelCase__ = len(a__ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCamelCase__ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , )
# Get datasets
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
UpperCamelCase__ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=a__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(a__ :EvalPrediction ) -> Dict:
UpperCamelCase__ = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(a__ , p.label_ids )}
# Data collator
UpperCamelCase__ = DataCollatorWithPadding(a__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
UpperCamelCase__ = Trainer(
model=a__ , args=a__ , train_dataset=a__ , eval_dataset=a__ , compute_metrics=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase__ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase__ = trainer.evaluate()
UpperCamelCase__ = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(a__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , a__ , a__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(a__ )
return results
def _UpperCamelCase (a__ :Union[str, Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 87 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
a = '''docs/source/en/_toctree.yml'''
def _snake_case ( _snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
_A = defaultdict(_lowercase )
for doc in model_doc:
counts[doc["local"]] += 1
_A = [key for key, value in counts.items() if value > 1]
_A = []
for duplicate_key in duplicates:
_A = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(_lowercase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(_lowercase , key=lambda _snake_case : s["title"].lower() )
def _snake_case ( _snake_case : Any=False ) -> Tuple:
'''simple docstring'''
with open(_lowercase , encoding='utf-8' ) as f:
_A = yaml.safe_load(f.read() )
# Get to the API doc
_A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_A = content[api_idx]['''sections''']
# Then to the model doc
_A = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_A = api_doc[model_idx]['''sections''']
_A = [(idx, section) for idx, section in enumerate(_lowercase ) if '''sections''' in section]
_A = False
for idx, modality_doc in modalities_docs:
_A = modality_doc['''sections''']
_A = clean_model_doc_toc(_lowercase )
if old_modality_doc != new_modality_doc:
_A = True
if overwrite:
_A = new_modality_doc
if diff:
if overwrite:
_A = model_doc
_A = api_doc
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_lowercase , allow_unicode=_lowercase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 315 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : Optional[Any] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 362 |
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306 | 0 |
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict = 0.0 , _SCREAMING_SNAKE_CASE : str = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302 |
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowerCAmelCase_ = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ : str = list(s_dict.keys() )
for key in keys:
snake_case_ : Optional[int] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
snake_case_ : List[str] = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f'''{key} -> {new_key}''' )
snake_case_ : Tuple = s_dict.pop(_UpperCamelCase )
return s_dict
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
snake_case_ , snake_case_ : Dict = emb.weight.shape
snake_case_ : Tuple = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
snake_case_ : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bytes:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : List[Any] = os.path.basename(_UpperCamelCase )
snake_case_ : Any = url.split('''/''' )[-2]
snake_case_ : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f'''{download_target} exists and is not a regular file''' )
if os.path.isfile(_UpperCamelCase ):
snake_case_ : Union[str, Any] = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , '''wb''' ) as output:
with tqdm(
total=int(source.info().get('''Content-Length''' ) ) , ncols=80 , unit='''iB''' , unit_scale=_UpperCamelCase , unit_divisor=1_024 ) as loop:
while True:
snake_case_ : Dict = source.read(8_192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
snake_case_ : Any = open(_UpperCamelCase , '''rb''' ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' )
return model_bytes
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
if ".pt" not in checkpoint_path:
snake_case_ : str = _download(_MODELS[checkpoint_path] )
else:
snake_case_ : Union[str, Any] = torch.load(_UpperCamelCase , map_location='''cpu''' )
snake_case_ : int = original_checkpoint['''dims''']
snake_case_ : List[str] = original_checkpoint['''model_state_dict''']
snake_case_ : str = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
snake_case_ : Optional[int] = True
snake_case_ : int = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
snake_case_ : List[str] = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
snake_case_ : Union[str, Any] = WhisperForConditionalGeneration(_UpperCamelCase )
snake_case_ , snake_case_ : List[Any] = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f''' but all the following weights are missing {missing}''' )
if tie_embeds:
snake_case_ : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case_ : Any = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowerCAmelCase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 279 | 0 |
def A_ ( snake_case_ : Optional[int] ):
'''simple docstring'''
if not isinstance(lowercase_ ,lowercase_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
UpperCamelCase : Dict = 0
UpperCamelCase : Optional[int] = str(lowercase_ )
while len(lowercase_ ) != 1:
UpperCamelCase : Any = [int(lowercase_ ) for i in num_string]
UpperCamelCase : List[Any] = 1
for i in range(0 ,len(lowercase_ ) ):
total *= numbers[i]
UpperCamelCase : Optional[Any] = str(lowercase_ )
steps += 1
return steps
def A_ ( snake_case_ : Dict ):
'''simple docstring'''
if not isinstance(lowercase_ ,lowercase_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
UpperCamelCase : Tuple = 0
UpperCamelCase : Any = str(lowercase_ )
while len(lowercase_ ) != 1:
UpperCamelCase : Tuple = [int(lowercase_ ) for i in num_string]
UpperCamelCase : str = 0
for i in range(0 ,len(lowercase_ ) ):
total += numbers[i]
UpperCamelCase : Optional[Any] = str(lowercase_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json''',
}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : Optional[int] = 'mvp'
lowercase : Optional[Any] = ['past_key_values']
lowercase : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_0267 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=800 , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Optional[int] = d_model
UpperCamelCase : Optional[Any] = encoder_ffn_dim
UpperCamelCase : Any = encoder_layers
UpperCamelCase : List[Any] = encoder_attention_heads
UpperCamelCase : Optional[Any] = decoder_ffn_dim
UpperCamelCase : Optional[int] = decoder_layers
UpperCamelCase : Dict = decoder_attention_heads
UpperCamelCase : List[str] = dropout
UpperCamelCase : List[str] = attention_dropout
UpperCamelCase : List[Any] = activation_dropout
UpperCamelCase : Dict = activation_function
UpperCamelCase : List[str] = init_std
UpperCamelCase : int = encoder_layerdrop
UpperCamelCase : Dict = decoder_layerdrop
UpperCamelCase : Any = classifier_dropout
UpperCamelCase : Tuple = use_cache
UpperCamelCase : Dict = encoder_layers
UpperCamelCase : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase : Optional[Any] = use_prompt
UpperCamelCase : Any = prompt_length
UpperCamelCase : List[Any] = prompt_mid_dim
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , forced_eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"""The config can simply be saved and uploaded again to be fixed.""" )
| 27 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
__SCREAMING_SNAKE_CASE : Any = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
__SCREAMING_SNAKE_CASE : int = {
"""camembert-base""": 512,
}
__SCREAMING_SNAKE_CASE : List[Any] = """▁"""
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = VOCAB_FILES_NAMES
__lowercase: List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowercase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase: List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[Any]="</s>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : Union[str, Any]="<unk>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : Any="<mask>" , UpperCAmelCase_ : Any=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : List[str] , ) ->None:
"""simple docstring"""
snake_case_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase_ ) )
snake_case_ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
snake_case_ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
snake_case_ = len(self.fairseq_tokens_to_ids )
snake_case_ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int = None ) ->List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Dict = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : int ) ->List[str]:
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCAmelCase_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase_ )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Tuple ) ->Tuple:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : int ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case_ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def __getstate__( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : List[str] , UpperCAmelCase_ : List[str] ) ->Any:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 347 |
'''simple docstring'''
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
else:
return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) )
def __lowerCamelCase ( _lowercase , _lowercase ) -> float:
if b < 0:
return 1 / actual_power(_lowercase , _lowercase )
return actual_power(_lowercase , _lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 265 | 0 |
import sys
a__: Any = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def UpperCamelCase__( UpperCamelCase__ : List[Any] = N )->Optional[Any]:
A__ = -sys.maxsize - 1
for i in range(len(_lowercase ) - 12 ):
A__ = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
A__ = product
return largest_product
if __name__ == "__main__":
print(F"{solution() = }")
| 358 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=0.999 , UpperCamelCase__ : Optional[int]="cosine" , )->Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A__ = []
for i in range(UpperCamelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self,__lowerCamelCase = 1000,__lowerCamelCase = 0.0001,__lowerCamelCase = 0.02,__lowerCamelCase = "linear",__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = True,__lowerCamelCase = 0,__lowerCamelCase = "epsilon",__lowerCamelCase = 1.0,**__lowerCamelCase,):
if kwargs.get('''set_alpha_to_one''',__lowerCamelCase ) is not None:
A__ = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''','''1.0.0''',__lowerCamelCase,standard_warn=__lowerCamelCase )
A__ = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
A__ = torch.tensor(__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5,beta_end**0.5,__lowerCamelCase,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas,dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
A__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
A__ = 1.0
# setable values
A__ = None
A__ = torch.from_numpy(np.arange(0,__lowerCamelCase ).copy().astype(np.intaa ) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
return sample
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
f" maximal {self.config.num_train_timesteps} timesteps." )
A__ = num_inference_steps
A__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0,__lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
A__ = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
self.timesteps += self.config.steps_offset
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = 0.0,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = True,):
# 1. get previous step value (=t+1)
A__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
A__ = self.alphas_cumprod[timestep]
A__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
A__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
A__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
A__ = model_output
elif self.config.prediction_type == "sample":
A__ = model_output
A__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
A__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
A__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range,self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__lowerCamelCase,pred_original_sample=__lowerCamelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 39 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=A__):
lowerCamelCase__ : List[Any] = ["flax", "transformers"]
def __init__( self , *a , **a ) -> Dict:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *a , **a ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *a , **a ) -> Union[str, Any]:
requires_backends(cls , ['flax', 'transformers'] )
class UpperCAmelCase_ ( metaclass=A__):
lowerCamelCase__ : Union[str, Any] = ["flax", "transformers"]
def __init__( self , *a , **a ) -> Union[str, Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *a , **a ) -> str:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *a , **a ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class UpperCAmelCase_ ( metaclass=A__):
lowerCamelCase__ : int = ["flax", "transformers"]
def __init__( self , *a , **a ) -> Dict:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *a , **a ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *a , **a ) -> List[Any]:
requires_backends(cls , ['flax', 'transformers'] )
class UpperCAmelCase_ ( metaclass=A__):
lowerCamelCase__ : Dict = ["flax", "transformers"]
def __init__( self , *a , **a ) -> Union[str, Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *a , **a ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *a , **a ) -> str:
requires_backends(cls , ['flax', 'transformers'] )
| 77 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :List[Any]=13 , lowerCamelCase_ :Any=7 , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Optional[int]=True , lowerCamelCase_ :Any=True , lowerCamelCase_ :List[str]=99 , lowerCamelCase_ :Dict=32 , lowerCamelCase_ :Union[str, Any]=5 , lowerCamelCase_ :int=4 , lowerCamelCase_ :Optional[Any]=37 , lowerCamelCase_ :Optional[Any]="gelu" , lowerCamelCase_ :List[str]=0.1 , lowerCamelCase_ :List[Any]=0.1 , lowerCamelCase_ :List[Any]=512 , lowerCamelCase_ :List[str]=16 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :Tuple=0.02 , lowerCamelCase_ :Tuple=4 , ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =parent
lowerCamelCase__ : List[Any] =batch_size
lowerCamelCase__ : Optional[int] =seq_length
lowerCamelCase__ : Optional[int] =is_training
lowerCamelCase__ : Optional[Any] =use_attention_mask
lowerCamelCase__ : List[Any] =use_token_type_ids
lowerCamelCase__ : List[Any] =use_labels
lowerCamelCase__ : Any =vocab_size
lowerCamelCase__ : int =hidden_size
lowerCamelCase__ : Dict =num_hidden_layers
lowerCamelCase__ : int =num_attention_heads
lowerCamelCase__ : List[str] =intermediate_size
lowerCamelCase__ : Dict =hidden_act
lowerCamelCase__ : str =hidden_dropout_prob
lowerCamelCase__ : Tuple =attention_probs_dropout_prob
lowerCamelCase__ : List[Any] =max_position_embeddings
lowerCamelCase__ : Tuple =type_vocab_size
lowerCamelCase__ : Any =type_sequence_label_size
lowerCamelCase__ : Dict =initializer_range
lowerCamelCase__ : str =num_choices
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Any =None
if self.use_attention_mask:
lowerCamelCase__ : Any =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Any =None
if self.use_token_type_ids:
lowerCamelCase__ : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : str =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : Dict =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =config_and_inputs
lowerCamelCase__ : int ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : int =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =config_and_inputs
lowerCamelCase__ : Optional[Any] =True
lowerCamelCase__ : Any =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A_ ( A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : str =FlaxBertModelTester(self )
@slow
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
lowerCamelCase__ : Dict =FlaxBertModel.from_pretrained('bert-base-cased' )
lowerCamelCase__ : List[str] =model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 126 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
super().__init__()
# make sure scheduler can always be converted to DDIM
_lowerCamelCase : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase , scheduler=lowercase )
@torch.no_grad()
def __call__( self , lowercase = 1 , lowercase = None , lowercase = 0.0 , lowercase = 50 , lowercase = None , lowercase = "pil" , lowercase = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowercase ):
_lowerCamelCase : str = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_lowerCamelCase : Dict = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowercase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowerCamelCase : Tuple = randn_tensor(lowercase , generator=lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCamelCase : List[Any] = self.unet(lowercase , lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCamelCase : Optional[Any] = self.scheduler.step(
lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase : Dict = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
| 351 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None ):
_lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase )
_lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height
_lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
_lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 12 | 0 |
def A_ ( ):
"""simple docstring"""
for n in range(1 , 1_0_0_0_0_0_0 ):
yield n * (n + 1) // 2
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : Any = 2
while i * i <= n:
SCREAMING_SNAKE_CASE_ : str = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def A_ ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(a ) > 5_0_0 )
if __name__ == "__main__":
print(solution())
| 253 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["""pixel_values"""]
def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384}
__magic_name__ : Dict = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[Any] = do_resize
__magic_name__ : str = size
# Default value set here for backwards compatibility where the value in config is None
__magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
__magic_name__ : int = resample
__magic_name__ : List[str] = do_rescale
__magic_name__ : List[Any] = rescale_factor
__magic_name__ : str = do_normalize
__magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
__magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__magic_name__ : Dict = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__magic_name__ : Dict = int(shortest_edge / crop_pct )
__magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
__magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__magic_name__ : Optional[Any] = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Dict = image_std if image_std is not None else self.image_std
__magic_name__ : Dict = size if size is not None else self.size
__magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
__magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 331 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : str = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = "ibert"
def __init__( self , __UpperCAmelCase=3_0522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase="absolute" , __UpperCAmelCase=False , __UpperCAmelCase="none" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = position_embedding_type
__UpperCamelCase = quant_mode
__UpperCamelCase = force_dequant
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
__UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 263 |
"""simple docstring"""
UpperCamelCase : Union[str, Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def A ( snake_case :Dict , snake_case :Tuple , snake_case :str , snake_case :Optional[int] ) -> Union[str, Any]:
# Return True if there is node that has not iterated.
__UpperCamelCase = [False] * len(snake_case )
__UpperCamelCase = [s]
__UpperCamelCase = True
while queue:
__UpperCamelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case )
__UpperCamelCase = True
__UpperCamelCase = u
return visited[t]
def A ( snake_case :int , snake_case :Any , snake_case :Union[str, Any] ) -> Optional[int]:
__UpperCamelCase = [-1] * (len(snake_case ))
__UpperCamelCase = 0
__UpperCamelCase = []
__UpperCamelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(snake_case , snake_case , snake_case , snake_case ):
__UpperCamelCase = float('Inf' )
__UpperCamelCase = sink
while s != source:
# Find the minimum value in select path
__UpperCamelCase = min(snake_case , graph[parent[s]][s] )
__UpperCamelCase = parent[s]
max_flow += path_flow
__UpperCamelCase = sink
while v != source:
__UpperCamelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__UpperCamelCase = parent[v]
for i in range(len(snake_case ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 263 | 1 |
from __future__ import annotations
def snake_case__ ( SCREAMING_SNAKE_CASE_ : list[float] ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
lowercase__ : Optional[Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class SCREAMING_SNAKE_CASE__ :
def __init__( self , a , a=13 , a=7 , a=False , a=True , a=False , a=False , a=19 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=16 , a=2 , a=0.02 , a=3 , a=4 , a=None , ):
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : Union[str, Any] = seq_length
lowercase__ : Optional[Any] = is_training
lowercase__ : Tuple = use_input_mask
lowercase__ : List[str] = use_token_type_ids
lowercase__ : Optional[Any] = use_labels
lowercase__ : List[str] = vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : Any = hidden_act
lowercase__ : Any = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : str = initializer_range
lowercase__ : List[str] = num_labels
lowercase__ : Union[str, Any] = num_choices
lowercase__ : Optional[int] = scope
def snake_case_ ( self):
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ : List[Any] = None
if self.use_input_mask:
lowercase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ : int = None
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ : str = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self):
lowercase__ : str = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=a , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def snake_case_ ( self , a , a , a , a , a , a):
lowercase__ : Dict = EsmForProteinFolding(config=a).float()
model.to(a)
model.eval()
lowercase__ : Union[str, Any] = model(a , attention_mask=a)
lowercase__ : Dict = model(a)
lowercase__ : int = model(a)
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3))
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2))
def snake_case_ ( self):
lowercase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : int = config_and_inputs
lowercase__ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ (__snake_case , __snake_case , unittest.TestCase ):
__lowerCamelCase : Dict = False
__lowerCamelCase : Dict = (EsmForProteinFolding,) if is_torch_available() else ()
__lowerCamelCase : Union[str, Any] = ()
__lowerCamelCase : List[Any] = {} if is_torch_available() else {}
__lowerCamelCase : Optional[Any] = False
def snake_case_ ( self):
lowercase__ : Tuple = EsmFoldModelTester(self)
lowercase__ : List[Any] = ConfigTester(self , config_class=a , hidden_size=37)
def snake_case_ ( self):
self.config_tester.run_common_tests()
def snake_case_ ( self):
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
@unittest.skip('Does not support attention outputs')
def snake_case_ ( self):
pass
@unittest.skip
def snake_case_ ( self):
pass
@unittest.skip('Esm does not support embedding resizing')
def snake_case_ ( self):
pass
@unittest.skip('Esm does not support embedding resizing')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support passing input embeds!')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support head pruning.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.')
def snake_case_ ( self):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold only has one output format.')
def snake_case_ ( self):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold does not support input chunking.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.')
def snake_case_ ( self):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.')
def snake_case_ ( self):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def snake_case_ ( self):
pass
@require_torch
class SCREAMING_SNAKE_CASE__ (__snake_case ):
@slow
def snake_case_ ( self):
lowercase__ : Dict = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1').float()
model.eval()
lowercase__ : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
lowercase__ : Optional[int] = model(a)['positions']
lowercase__ : Dict = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa)
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , a , atol=1e-4))
| 214 | 1 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
snake_case_ = None
snake_case_ = None
class lowerCamelCase ( folder_based_builder.FolderBasedBuilder ):
snake_case_ = datasets.Audio()
snake_case_ = '''audio'''
snake_case_ = AudioFolderConfig
snake_case_ = 42 # definition at the bottom of the script
snake_case_ = AudioClassification(audio_column='''audio''' , label_column='''label''' )
lowerCAmelCase_ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
lowerCAmelCase_ = AUDIO_EXTENSIONS
| 332 |
'''simple docstring'''
from pathlib import Path
import fire
def __magic_name__ ( A , A , A ) -> Union[str, Any]:
snake_case = Path(A )
snake_case = Path(A )
dest_dir.mkdir(exist_ok=A )
for path in src_dir.iterdir():
snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case = dest_dir.joinpath(path.name )
print(A )
dest_path.open('w' ).write('\n'.join(A ) )
if __name__ == "__main__":
fire.Fire(minify)
| 332 | 1 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
SCREAMING_SNAKE_CASE : str = True
except (ImportError, AttributeError):
SCREAMING_SNAKE_CASE : Any = object
def lowercase ( *_snake_case : Tuple , **_snake_case : Optional[Any] ) ->Union[str, Any]:
"""simple docstring"""
pass
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Any = logging.get_logger("""transformers-cli/serving""")
def lowercase ( _snake_case : Namespace ) ->Tuple:
"""simple docstring"""
__snake_case : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_snake_case , args.host , args.port , args.workers )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =42
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE (a_ ):
'''simple docstring'''
__snake_case : Dict = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=a_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=a_ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=a_ , default=88_88 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=a_ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=a_ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=a_ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=a_ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=a_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=a_ )
def __init__(self , a_ , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = pipeline
__snake_case : int = host
__snake_case : Any = port
__snake_case : Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install "transformers[serving]".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f"""Serving model over {host}:{port}""" )
__snake_case : Dict = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=a_ , response_class=a_ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=a_ , response_class=a_ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=a_ , response_class=a_ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=a_ , response_class=a_ , methods=['''POST'''] , ),
] , timeout=6_00 , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def SCREAMING_SNAKE_CASE (self , a_ = Body(a_ , embed=a_ ) , a_ = Body(a_ , embed=a_ ) ):
'''simple docstring'''
try:
__snake_case : int = self._pipeline.tokenizer.tokenize(a_ )
if return_ids:
__snake_case : Union[str, Any] = self._pipeline.tokenizer.convert_tokens_to_ids(a_ )
return ServeTokenizeResult(tokens=a_ , tokens_ids=a_ )
else:
return ServeTokenizeResult(tokens=a_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(a_ )} )
def SCREAMING_SNAKE_CASE (self , a_ = Body(a_ , embed=a_ ) , a_ = Body(a_ , embed=a_ ) , a_ = Body(a_ , embed=a_ ) , ):
'''simple docstring'''
try:
__snake_case : List[Any] = self._pipeline.tokenizer.decode(a_ , a_ , a_ )
return ServeDeTokenizeResult(model='''''' , text=a_ )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(a_ )} )
async def SCREAMING_SNAKE_CASE (self , a_=Body(a_ , embed=a_ ) ):
'''simple docstring'''
if len(a_ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
__snake_case : Dict = self._pipeline(a_ )
return ServeForwardResult(output=a_ )
except Exception as e:
raise HTTPException(5_00 , {'''error''': str(a_ )} )
| 102 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 102 | 1 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : int , snake_case_ : Optional[int] , snake_case_ : Dict=2 , snake_case_ : int=8 , snake_case_ : List[str]=True , snake_case_ : Any=True , snake_case_ : List[Any]=True , snake_case_ : List[str]=True , snake_case_ : Any=99 , snake_case_ : Any=16 , snake_case_ : Union[str, Any]=5 , snake_case_ : Tuple=2 , snake_case_ : List[str]=36 , snake_case_ : Tuple="gelu" , snake_case_ : Any=0.0 , snake_case_ : List[str]=0.0 , snake_case_ : Union[str, Any]=512 , snake_case_ : Optional[Any]=16 , snake_case_ : int=2 , snake_case_ : Any=0.02 , snake_case_ : Union[str, Any]=3 , snake_case_ : int=4 , snake_case_ : int=None , ):
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = seq_length
snake_case__ = is_training
snake_case__ = use_input_mask
snake_case__ = use_token_type_ids
snake_case__ = use_labels
snake_case__ = vocab_size
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = max_position_embeddings
snake_case__ = type_vocab_size
snake_case__ = type_sequence_label_size
snake_case__ = initializer_range
snake_case__ = num_labels
snake_case__ = num_choices
snake_case__ = scope
def lowerCamelCase ( self : str ):
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ = None
if self.use_input_mask:
snake_case__ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ = None
if self.use_token_type_ids:
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ = None
snake_case__ = None
snake_case__ = None
if self.use_labels:
snake_case__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : Optional[int] ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : List[Any] ):
snake_case__ = self.get_config()
snake_case__ = 300
return config
def lowerCamelCase ( self : List[str] ):
(
snake_case__
) = self.prepare_config_and_inputs()
snake_case__ = True
snake_case__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase ( self : Optional[int] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : str , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Tuple ):
snake_case__ = MraModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case__ = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case__ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : str , snake_case_ : str , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Dict , ):
snake_case__ = True
snake_case__ = MraModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
snake_case__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
snake_case__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : List[str] , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ):
snake_case__ = MraForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : int , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Any ):
snake_case__ = MraForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : Dict , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Any , snake_case_ : str , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
snake_case__ = self.num_labels
snake_case__ = MraForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple , snake_case_ : str , snake_case_ : int , snake_case_ : str , snake_case_ : str , snake_case_ : int , snake_case_ : str ):
snake_case__ = self.num_labels
snake_case__ = MraForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case__ = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ):
snake_case__ = self.num_choices
snake_case__ = MraForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self : Dict ):
snake_case__ = self.prepare_config_and_inputs()
(
snake_case__
) = config_and_inputs
snake_case__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
lowercase = ()
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ = MraModelTester(self )
snake_case__ = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def lowerCamelCase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Tuple ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowerCamelCase ( self : Dict ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def lowerCamelCase ( self : Dict ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def lowerCamelCase ( self : str ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def lowerCamelCase ( self : Any ):
snake_case__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ = MraModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason="""MRA does not output attentions""" )
def lowerCamelCase ( self : Tuple ):
return
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
snake_case__ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
snake_case__ = model(lowerCAmelCase__ )[0]
snake_case__ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case__ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
snake_case__ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
snake_case__ = model(lowerCAmelCase__ )[0]
snake_case__ = 50_265
snake_case__ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case__ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def lowerCamelCase ( self : Any ):
snake_case__ = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
snake_case__ = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
snake_case__ = model(lowerCAmelCase__ )[0]
snake_case__ = 50_265
snake_case__ = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case__ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 367 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Tuple=0 ):
snake_case__ : Any = floats_tensor((1, 3, 128, 128) , rng=random.Random(snake_case_ ) )
snake_case__ : List[str] = np.random.RandomState(snake_case_ )
snake_case__ : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : Union[str, Any] = pipe(**snake_case_ ).images
snake_case__ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
snake_case__ : int = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Dict ):
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Dict = self.get_dummy_inputs()
snake_case__ : int = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Tuple = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
# warmup pass to apply optimizations
snake_case__ : List[Any] = pipe(**self.get_dummy_inputs() )
snake_case__ : List[str] = self.get_dummy_inputs()
snake_case__ : Optional[int] = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Any = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : str ):
snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = self.get_dummy_inputs()
snake_case__ : List[Any] = pipe(**snake_case_ ).images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : Optional[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : str ):
snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Tuple = self.get_dummy_inputs()
snake_case__ : Tuple = pipe(**snake_case_ ).images
snake_case__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : int = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def lowerCamelCase ( self : Dict ):
snake_case__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
snake_case__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[str] = self.get_dummy_inputs()
snake_case__ : List[str] = pipe(**snake_case_ ).images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
snake_case__ : List[str] = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowerCamelCase ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCamelCase ( self : Dict ):
snake_case__ : Tuple = ort.SessionOptions()
snake_case__ : Optional[Any] = False
return options
def lowerCamelCase ( self : List[str] ):
snake_case__ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : str = init_image.resize((768, 512) )
# using the PNDM scheduler by default
snake_case__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Dict = """A fantasy landscape, trending on artstation"""
snake_case__ : str = np.random.RandomState(0 )
snake_case__ : Union[str, Any] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : str = output.images
snake_case__ : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case__ : Optional[Any] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
snake_case__ : List[Any] = init_image.resize((768, 512) )
snake_case__ : Tuple = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
snake_case__ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=snake_case_ , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = """A fantasy landscape, trending on artstation"""
snake_case__ : Optional[int] = np.random.RandomState(0 )
snake_case__ : Optional[int] = pipe(
prompt=snake_case_ , image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=snake_case_ , output_type="""np""" , )
snake_case__ : Any = output.images
snake_case__ : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
snake_case__ : Tuple = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 43 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
__lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class __a :
__lowercase : Optional[Any] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__lowercase : List[Any] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
__lowercase : str = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowercase : int = field(
default=_lowerCAmelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__lowercase : Optional[int] = field(
default=_lowerCAmelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
__lowercase : Any = field(
default=_lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[Any] = field(
default=_lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__lowercase : int = field(
default=_lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
__lowercase : int = field(
default=_lowerCAmelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
__lowercase : Optional[int] = field(
default=_lowerCAmelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
__lowercase : Optional[int] = field(default=_lowerCAmelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
lowercase__: List[Any] = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowercase__: Tuple = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__lowercase : Dict = field(
default=_lowerCAmelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowercase : Dict = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : str = field(
default=_lowerCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowercase : int = field(
default=_lowerCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowercase : Any = field(
default=_lowerCAmelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowercase : Any = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : int = field(
default=_lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def snake_case_ ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__: Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__: str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__: Any = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__: Tuple = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase__: Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__: List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowercase__: Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowercase__: Dict = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowercase__: Dict = data_args.train_file.split('.' )[-1]
lowercase__: Dict = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowercase__: Tuple = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
lowercase__: Optional[int] = load_dataset('csv' , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowercase__: List[Any] = load_dataset('json' , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowercase__: List[str] = raw_datasets["train"].features["label"].names
lowercase__: Tuple = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__: Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowercase__: Optional[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__lowerCamelCase , )
lowercase__: List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowercase__: Tuple = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase__: Tuple = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowercase__: Optional[int] = {"Refused": 0, "Entailed": 1}
lowercase__: List[Any] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowercase__: Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(snake_case ):
# Tokenize the texts
def _convert_table_text_to_pandas(snake_case ):
lowercase__: int = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
lowercase__: Tuple = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowercase__: int = examples["statement"]
lowercase__: List[str] = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
lowercase__: Dict = tokenizer(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , truncation=__lowerCamelCase )
lowercase__: Union[str, Any] = examples["label"]
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
lowercase__: Any = raw_datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowercase__: Dict = raw_datasets["train"]
if data_args.max_train_samples is not None:
lowercase__: Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowercase__: Dict = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
lowercase__: Tuple = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
lowercase__: int = raw_datasets["test"]
if data_args.max_predict_samples is not None:
lowercase__: int = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case ):
lowercase__: Optional[int] = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions
lowercase__: Tuple = np.argmax(__lowerCamelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase__: List[str] = default_data_collator
elif training_args.fpaa:
lowercase__: Dict = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 )
else:
lowercase__: Any = None
# Initialize our Trainer
lowercase__: Dict = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
lowercase__: Any = None
if training_args.resume_from_checkpoint is not None:
lowercase__: Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__: Dict = last_checkpoint
lowercase__: Optional[int] = trainer.train(resume_from_checkpoint=__lowerCamelCase )
lowercase__: Dict = train_result.metrics
lowercase__: str = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
lowercase__: int = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , __lowerCamelCase )
trainer.save_metrics('train' , __lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowercase__: Any = trainer.evaluate(eval_dataset=__lowerCamelCase )
lowercase__: List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
lowercase__: List[str] = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics('eval' , __lowerCamelCase )
trainer.save_metrics('eval' , __lowerCamelCase )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowercase__: Union[str, Any] = predict_dataset.remove_columns('label' )
lowercase__: Tuple = trainer.predict(__lowerCamelCase , metric_key_prefix='predict' ).predictions
lowercase__: int = np.argmax(__lowerCamelCase , axis=1 )
lowercase__: Dict = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(__lowerCamelCase ):
lowercase__: Union[str, Any] = label_list[item]
writer.write(f'{index}\t{item}\n' )
lowercase__: Any = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
def snake_case_ ( snake_case ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 196 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCAmelCase : Dict =collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCAmelCase : str ='''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def UpperCAmelCase_ ( __lowerCamelCase : str ):
lowercase_ :Tuple = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) ,dtype=__lowerCamelCase )[0]
@deprecated(__lowerCamelCase ,"Please use tf.data to implement this functionality." )
def UpperCAmelCase_ ( __lowerCamelCase : Any ):
print("Extracting" ,f.name )
with gzip.GzipFile(fileobj=__lowerCamelCase ) as bytestream:
lowercase_ :Union[str, Any] = _readaa(__lowerCamelCase )
if magic != 20_51:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase_ :int = _readaa(__lowerCamelCase )
lowercase_ :int = _readaa(__lowerCamelCase )
lowercase_ :Tuple = _readaa(__lowerCamelCase )
lowercase_ :Optional[Any] = bytestream.read(rows * cols * num_images )
lowercase_ :List[str] = numpy.frombuffer(__lowerCamelCase ,dtype=numpy.uinta )
lowercase_ :Dict = data.reshape(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,1 )
return data
@deprecated(__lowerCamelCase ,"Please use tf.one_hot on tensors." )
def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ,__lowerCamelCase : Tuple ):
lowercase_ :int = labels_dense.shape[0]
lowercase_ :Any = numpy.arange(__lowerCamelCase ) * num_classes
lowercase_ :Optional[int] = numpy.zeros((num_labels, num_classes) )
lowercase_ :List[Any] = 1
return labels_one_hot
@deprecated(__lowerCamelCase ,"Please use tf.data to implement this functionality." )
def UpperCAmelCase_ ( __lowerCamelCase : Tuple ,__lowerCamelCase : Optional[Any]=False ,__lowerCamelCase : List[str]=10 ):
print("Extracting" ,f.name )
with gzip.GzipFile(fileobj=__lowerCamelCase ) as bytestream:
lowercase_ :List[Any] = _readaa(__lowerCamelCase )
if magic != 20_49:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase_ :List[str] = _readaa(__lowerCamelCase )
lowercase_ :Tuple = bytestream.read(__lowerCamelCase )
lowercase_ :int = numpy.frombuffer(__lowerCamelCase ,dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowerCamelCase ,__lowerCamelCase )
return labels
class a_ :
@deprecated(
lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self : Tuple , lowercase : str , lowercase : Optional[int] , lowercase : List[Any]=False , lowercase : Tuple=False , lowercase : Optional[Any]=dtypes.floataa , lowercase : Tuple=True , lowercase : Optional[Any]=None , ):
"""simple docstring"""
lowercase_ , lowercase_ :Union[str, Any] = random_seed.get_seed(lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase_ :Optional[int] = dtypes.as_dtype(lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase_ :List[str] = 10_000
lowercase_ :int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
lowercase_ :List[Any] = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase_ :Optional[int] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase_ :Any = images.astype(numpy.floataa )
lowercase_ :Optional[int] = numpy.multiply(lowercase , 1.0 / 2_55.0 )
lowercase_ :Tuple = images
lowercase_ :Any = labels
lowercase_ :Dict = 0
lowercase_ :Optional[Any] = 0
@property
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self._images
@property
def lowercase__ ( self : Tuple ):
"""simple docstring"""
return self._labels
@property
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
return self._num_examples
@property
def lowercase__ ( self : str ):
"""simple docstring"""
return self._epochs_completed
def lowercase__ ( self : List[str] , lowercase : int , lowercase : List[Any]=False , lowercase : int=True ):
"""simple docstring"""
if fake_data:
lowercase_ :List[str] = [1] * 784
lowercase_ :Dict = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowercase )],
[fake_label for _ in range(lowercase )],
)
lowercase_ :Optional[Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase_ :Dict = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
lowercase_ :int = self.images[perma]
lowercase_ :Tuple = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase_ :Dict = self._num_examples - start
lowercase_ :List[str] = self._images[start : self._num_examples]
lowercase_ :Optional[Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase_ :str = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
lowercase_ :Optional[int] = self.images[perm]
lowercase_ :List[Any] = self.labels[perm]
# Start next epoch
lowercase_ :List[str] = 0
lowercase_ :Any = batch_size - rest_num_examples
lowercase_ :Optional[int] = self._index_in_epoch
lowercase_ :Tuple = self._images[start:end]
lowercase_ :Optional[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase_ :Any = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowerCamelCase ,"Please write your own downloading logic." )
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Any ,__lowerCamelCase : Dict ):
if not gfile.Exists(__lowerCamelCase ):
gfile.MakeDirs(__lowerCamelCase )
lowercase_ :Dict = os.path.join(__lowerCamelCase ,__lowerCamelCase )
if not gfile.Exists(__lowerCamelCase ):
urllib.request.urlretrieve(__lowerCamelCase ,__lowerCamelCase ) # noqa: S310
with gfile.GFile(__lowerCamelCase ) as f:
lowercase_ :List[str] = f.size()
print("Successfully downloaded" ,__lowerCamelCase ,__lowerCamelCase ,"bytes." )
return filepath
@deprecated(
__lowerCamelCase ,"Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int=False ,__lowerCamelCase : Dict=False ,__lowerCamelCase : List[Any]=dtypes.floataa ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : int=50_00 ,__lowerCamelCase : Optional[Any]=None ,__lowerCamelCase : str=DEFAULT_SOURCE_URL ,):
if fake_data:
def fake():
return _DataSet(
[] ,[] ,fake_data=__lowerCamelCase ,one_hot=__lowerCamelCase ,dtype=__lowerCamelCase ,seed=__lowerCamelCase )
lowercase_ :int = fake()
lowercase_ :Optional[Any] = fake()
lowercase_ :Tuple = fake()
return _Datasets(train=__lowerCamelCase ,validation=__lowerCamelCase ,test=__lowerCamelCase )
if not source_url: # empty string check
lowercase_ :str = DEFAULT_SOURCE_URL
lowercase_ :Optional[int] = "train-images-idx3-ubyte.gz"
lowercase_ :Optional[int] = "train-labels-idx1-ubyte.gz"
lowercase_ :Optional[Any] = "t10k-images-idx3-ubyte.gz"
lowercase_ :int = "t10k-labels-idx1-ubyte.gz"
lowercase_ :Tuple = _maybe_download(
__lowerCamelCase ,__lowerCamelCase ,source_url + train_images_file )
with gfile.Open(__lowerCamelCase ,"rb" ) as f:
lowercase_ :Any = _extract_images(__lowerCamelCase )
lowercase_ :Optional[Any] = _maybe_download(
__lowerCamelCase ,__lowerCamelCase ,source_url + train_labels_file )
with gfile.Open(__lowerCamelCase ,"rb" ) as f:
lowercase_ :Any = _extract_labels(__lowerCamelCase ,one_hot=__lowerCamelCase )
lowercase_ :Any = _maybe_download(
__lowerCamelCase ,__lowerCamelCase ,source_url + test_images_file )
with gfile.Open(__lowerCamelCase ,"rb" ) as f:
lowercase_ :Union[str, Any] = _extract_images(__lowerCamelCase )
lowercase_ :int = _maybe_download(
__lowerCamelCase ,__lowerCamelCase ,source_url + test_labels_file )
with gfile.Open(__lowerCamelCase ,"rb" ) as f:
lowercase_ :Union[str, Any] = _extract_labels(__lowerCamelCase ,one_hot=__lowerCamelCase )
if not 0 <= validation_size <= len(__lowerCamelCase ):
lowercase_ :Union[str, Any] = (
"Validation size should be between 0 and "
F'{len(__lowerCamelCase )}. Received: {validation_size}.'
)
raise ValueError(__lowerCamelCase )
lowercase_ :int = train_images[:validation_size]
lowercase_ :Optional[int] = train_labels[:validation_size]
lowercase_ :List[str] = train_images[validation_size:]
lowercase_ :int = train_labels[validation_size:]
lowercase_ :Dict = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase_ :List[str] = _DataSet(__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
lowercase_ :str = _DataSet(__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
lowercase_ :List[str] = _DataSet(__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase )
return _Datasets(train=__lowerCamelCase ,validation=__lowerCamelCase ,test=__lowerCamelCase )
| 223 | 0 |
from string import ascii_uppercase
UpperCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)}
UpperCAmelCase_ = dict(enumerate(ascii_uppercase))
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = len(a__ )
__lowerCamelCase = 0
while True:
if x == i:
__lowerCamelCase = 0
if len(a__ ) == len(a__ ):
break
key += key[i]
i += 1
return key
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = """"""
__lowerCamelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__lowerCamelCase = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def lowerCamelCase__ ( A__ : str , A__ : str ):
'''simple docstring'''
__lowerCamelCase = """"""
__lowerCamelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__lowerCamelCase = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = """THE GERMAN ATTACK"""
__lowerCamelCase = """SECRET"""
__lowerCamelCase = generate_key(a__ , a__ )
__lowerCamelCase = cipher_text(a__ , a__ )
print(f'Encrypted Text = {s}' )
print(f'Original Text = {original_text(a__ , a__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 370 |
import string
import numpy
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A__ )
class lowerCamelCase__:
UpperCAmelCase__ : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase__ : Optional[int] = numpy.vectorize(lambda __lowerCamelCase: x % 36)
UpperCAmelCase__ : List[Any] = numpy.vectorize(__lowerCamelCase)
def __init__( self: List[Any] , UpperCamelCase_: numpy.ndarray ):
__lowerCamelCase = self.modulus(UpperCamelCase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__lowerCamelCase = encrypt_key.shape[0]
def lowerCAmelCase__ ( self: str , UpperCamelCase_: str ):
return self.key_string.index(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: int ):
return self.key_string[round(UpperCamelCase_ )]
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = len(self.key_string )
if greatest_common_divisor(UpperCamelCase_ , len(self.key_string ) ) != 1:
__lowerCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ):
__lowerCamelCase = [char for char in text.upper() if char in self.key_string]
__lowerCamelCase = chars[-1]
while len(UpperCamelCase_ ) % self.break_key != 0:
chars.append(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(self.encrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[
0
]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__lowerCamelCase = det % len(self.key_string )
__lowerCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__lowerCamelCase = i
break
__lowerCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = self.make_decrypt_key()
__lowerCamelCase = self.process_text(text.upper() )
__lowerCamelCase = """"""
for i in range(0 , len(UpperCamelCase_ ) - self.break_key + 1 , self.break_key ):
__lowerCamelCase = text[i : i + self.break_key]
__lowerCamelCase = [self.replace_letters(UpperCamelCase_ ) for char in batch]
__lowerCamelCase = numpy.array([vec] ).T
__lowerCamelCase = self.modulus(decrypt_key.dot(UpperCamelCase_ ) ).T.tolist()[0]
__lowerCamelCase = """""".join(
self.replace_digits(UpperCamelCase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = int(input("""Enter the order of the encryption key: """ ) )
__lowerCamelCase = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(A__ ):
__lowerCamelCase = [int(A__ ) for x in input().split()]
hill_matrix.append(A__ )
__lowerCamelCase = HillCipher(numpy.array(A__ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__lowerCamelCase = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__lowerCamelCase = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(A__ ) )
elif option == "2":
__lowerCamelCase = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(A__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 29 | 0 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class snake_case__ :
"""simple docstring"""
def __init__( self : Any, _snake_case : Optional[int], _snake_case : List[str]=sys.maxsize ) ->Any:
snake_case__ : Any = 'bilinear'
snake_case__ : Optional[int] = max_size
snake_case__ : Union[str, Any] = short_edge_length
def __call__( self : Tuple, _snake_case : int ) ->List[Any]:
snake_case__ : List[str] = []
for img in imgs:
snake_case__ , snake_case__ : Union[str, Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ : Dict = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1 )
if size == 0:
return img
snake_case__ : Any = size * 1.0 / min(_snake_case, _snake_case )
if h < w:
snake_case__ , snake_case__ : str = size, scale * w
else:
snake_case__ , snake_case__ : Optional[int] = scale * h, size
if max(_snake_case, _snake_case ) > self.max_size:
snake_case__ : Union[str, Any] = self.max_size * 1.0 / max(_snake_case, _snake_case )
snake_case__ : Optional[int] = newh * scale
snake_case__ : Dict = neww * scale
snake_case__ : List[str] = int(neww + 0.5 )
snake_case__ : Dict = int(newh + 0.5 )
if img.dtype == np.uinta:
snake_case__ : str = Image.fromarray(_snake_case )
snake_case__ : List[Any] = pil_image.resize((neww, newh), PILImageResampling.BILINEAR )
snake_case__ : List[Any] = np.asarray(_snake_case )
else:
snake_case__ : Dict = img.permute(2, 0, 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
snake_case__ : int = nn.functional.interpolate(
_snake_case, (newh, neww), mode=self.interp_method, align_corners=_snake_case ).squeeze(0 )
img_augs.append(_snake_case )
return img_augs
class snake_case__ :
"""simple docstring"""
def __init__( self : Dict, _snake_case : List[Any] ) ->Union[str, Any]:
snake_case__ : Optional[int] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST )
snake_case__ : Optional[Any] = cfg.INPUT.FORMAT
snake_case__ : Optional[Any] = cfg.SIZE_DIVISIBILITY
snake_case__ : int = cfg.PAD_VALUE
snake_case__ : Optional[int] = cfg.INPUT.MAX_SIZE_TEST
snake_case__ : Optional[int] = cfg.MODEL.DEVICE
snake_case__ : List[str] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ), 1, 1 )
snake_case__ : Dict = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ), 1, 1 )
snake_case__ : Any = lambda _snake_case : (x - self.pixel_mean) / self.pixel_std
def lowercase_ ( self : Union[str, Any], _snake_case : Optional[int] ) ->List[Any]:
snake_case__ : str = tuple(max(_snake_case ) for s in zip(*[img.shape for img in images] ) )
snake_case__ : Any = [im.shape[-2:] for im in images]
snake_case__ : Any = [
nn.functional.pad(
_snake_case, [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]], value=self.pad_value, )
for size, im in zip(_snake_case, _snake_case )
]
return torch.stack(_snake_case ), torch.tensor(_snake_case )
def __call__( self : Dict, _snake_case : int, _snake_case : Dict=False ) ->int:
with torch.no_grad():
if not isinstance(_snake_case, _snake_case ):
snake_case__ : Optional[Any] = [images]
if single_image:
assert len(_snake_case ) == 1
for i in range(len(_snake_case ) ):
if isinstance(images[i], torch.Tensor ):
images.insert(_snake_case, images.pop(_snake_case ).to(self.device ).float() )
elif not isinstance(images[i], torch.Tensor ):
images.insert(
_snake_case, torch.as_tensor(img_tensorize(images.pop(_snake_case ), input_format=self.input_format ) )
.to(self.device )
.float(), )
# resize smallest edge
snake_case__ : Union[str, Any] = torch.tensor([im.shape[:2] for im in images] )
snake_case__ : str = self.aug(_snake_case )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ : str = [self.normalizer(_snake_case ) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ : List[str] = self.pad(_snake_case )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ : Tuple = torch.true_divide(_snake_case, _snake_case )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowercase_ (A : List[str] , A : Any ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowercase_ (A : List[str] , A : Tuple[int, int] ):
assert torch.isfinite(A ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ : Dict = box_size
tensor[:, 0].clamp_(min=0 , max=A )
tensor[:, 1].clamp_(min=0 , max=A )
tensor[:, 2].clamp_(min=0 , max=A )
tensor[:, 3].clamp_(min=0 , max=A )
| 277 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a_ :Any = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
a_ :List[str] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
a_ :List[str] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
"""simple docstring"""
def lowercase_ ( self : str ) ->MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def lowercase_ ( self : str, _snake_case : List[List[List[str]]], _snake_case : List[List[str]], _snake_case : int = 1, _snake_case : int = 4, ) ->Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_snake_case, hypotheses=_snake_case, min_len=_snake_case, max_len=_snake_case )
}
| 277 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class __snake_case ( _lowercase):
snake_case__ : List[Any] = "xglm"
snake_case__ : Dict = ["past_key_values"]
snake_case__ : str = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , __lowerCAmelCase : List[Any]=2_5_6_0_0_8 , __lowerCAmelCase : int=2_0_4_8 , __lowerCAmelCase : Dict=1_0_2_4 , __lowerCAmelCase : List[str]=4_0_9_6 , __lowerCAmelCase : Tuple=2_4 , __lowerCAmelCase : Dict=1_6 , __lowerCAmelCase : Tuple="gelu" , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : List[Any]=0.0 , __lowerCAmelCase : int=0.02 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=2 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : List[Any]=2 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : List[Any] = max_position_embeddings
_lowerCamelCase : int = d_model
_lowerCamelCase : Optional[Any] = ffn_dim
_lowerCamelCase : Any = num_layers
_lowerCamelCase : Union[str, Any] = attention_heads
_lowerCamelCase : List[str] = activation_function
_lowerCamelCase : Union[str, Any] = dropout
_lowerCamelCase : int = attention_dropout
_lowerCamelCase : Optional[int] = activation_dropout
_lowerCamelCase : Any = layerdrop
_lowerCamelCase : List[str] = init_std
_lowerCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : str = use_cache
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
| 175 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
A : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
A : Tuple = 2_5_6_0_4_7
A : Dict = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict =NllbTokenizer
__UpperCAmelCase : str =NllbTokenizerFast
__UpperCAmelCase : str =True
__UpperCAmelCase : int =True
__UpperCAmelCase : List[Any] ={}
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase = NllbTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = NllbTokenizer(__a , keep_accents=__a )
__lowerCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowerCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCAmelCase = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def snake_case ( self ):
__lowerCAmelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__a , **__a )
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__a )
__lowerCAmelCase = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__lowerCAmelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__a )
__lowerCAmelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=True
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__a , legacy_format=__a )
__lowerCAmelCase = tokenizer_p.save_pretrained(__a )
# Checks it save with the same files
self.assertSequenceEqual(__a , __a )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__a )
__lowerCAmelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
# Save tokenizer rust, legacy_format=False
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = tokenizer_r.save_pretrained(__a , legacy_format=__a )
__lowerCAmelCase = tokenizer_p.save_pretrained(__a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowerCAmelCase = tokenizer_r.from_pretrained(__a )
__lowerCAmelCase = tokenizer_p.from_pretrained(__a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__a , __a ) )
shutil.rmtree(__a )
@require_torch
def snake_case ( self ):
if not self.test_seqaseq:
return
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Longer text that will definitely require truncation.
__lowerCAmelCase = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
__lowerCAmelCase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
__lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
src_texts=__a , tgt_texts=__a , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
__lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
__a , tgt_texts=__a , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
__lowerCAmelCase = tokenizer.prepare_seqaseq_batch(
src_texts=__a , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , __a )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def snake_case ( self ):
pass
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase = [AddedToken("<special>" , lstrip=__a )]
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a )
__lowerCAmelCase = tokenizer_r.encode("Hey this is a <special> token" )
__lowerCAmelCase = tokenizer_r.encode("<special>" , add_special_tokens=__a )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , **__a )
__lowerCAmelCase = tokenizer_p.encode("Hey this is a <special> token" )
__lowerCAmelCase = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Tuple ="""facebook/nllb-200-distilled-600M"""
__UpperCAmelCase : Any =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__UpperCAmelCase : List[Any] =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__UpperCAmelCase : str =[
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def snake_case ( cls ):
__lowerCAmelCase = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
__lowerCAmelCase = 1
return cls
def snake_case ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_60_57 )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def snake_case ( self ):
self.assertIn(__a , self.tokenizer.all_special_ids )
# fmt: off
__lowerCAmelCase = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
__lowerCAmelCase = self.tokenizer.decode(__a , skip_special_tokens=__a )
__lowerCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def snake_case ( self ):
__lowerCAmelCase = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __a )
__lowerCAmelCase = 10
__lowerCAmelCase = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , __a )
self.assertEqual(len(__a ) , __a )
def snake_case ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_62_03, 3] )
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
__lowerCAmelCase = NllbTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__lowerCAmelCase = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(__a , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" )
__lowerCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="pt" )
__lowerCAmelCase = targets["input_ids"]
__lowerCAmelCase = shift_tokens_right(
__a , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"input_ids": [[25_60_47, 70, 73_56, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_60_57,
} , )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = True
__lowerCAmelCase = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
__lowerCAmelCase = False
__lowerCAmelCase = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 57 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> Optional[int]:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.asarray(weights[0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[2] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.key ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.self_attention.value ,torch.tensor(snake_case__ ).transpose(1 ,2 ).contiguous().view(-1 ,snake_case__ ) ,)
set_param(
torch_layer.output.dense ,torch.tensor(snake_case__ ).view(-1 ,snake_case__ ).contiguous().transpose(0 ,1 ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = weights[0][0][0]
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] )
_SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# lsh weights + output
_SCREAMING_SNAKE_CASE = weights[0][1]
if len(snake_case__ ) < 4:
set_layer_weights_in_torch_lsh(snake_case__ ,torch_block.attention ,snake_case__ )
else:
set_layer_weights_in_torch_local(snake_case__ ,torch_block.attention ,snake_case__ )
# intermediate weighs
_SCREAMING_SNAKE_CASE = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case__ ) == 4:
_SCREAMING_SNAKE_CASE = intermediate_weights[2]
# layernorm 2
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# intermediate dense
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
# intermediate out
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] )
_SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = torch_model.reformer
# word embeds
_SCREAMING_SNAKE_CASE = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings ,torch.tensor(snake_case__ ) ,)
if isinstance(weights[3] ,snake_case__ ):
_SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
_SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(snake_case__ ) )
_SCREAMING_SNAKE_CASE = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case__ ,snake_case__ ,snake_case__ )
# output layer norm
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm ,torch.tensor(snake_case__ ) ,torch.tensor(snake_case__ ) ,)
# output embeddings
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] )
_SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder ,torch.tensor(snake_case__ ).transpose(0 ,1 ).contiguous() ,torch.tensor(snake_case__ ) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
_SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(snake_case__ )
with open(snake_case__ ,"""rb""" ) as f:
_SCREAMING_SNAKE_CASE = pickle.load(snake_case__ )["""weights"""]
set_model_weights_in_torch(snake_case__ ,snake_case__ ,config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : List[Any]=1_3, _UpperCAmelCase : Union[str, Any]=7, _UpperCAmelCase : Any=True, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Optional[Any]=True, _UpperCAmelCase : Dict=True, _UpperCAmelCase : Optional[Any]=9_9, _UpperCAmelCase : Tuple=3_2, _UpperCAmelCase : str=2, _UpperCAmelCase : Optional[int]=4, _UpperCAmelCase : Optional[Any]=3_7, _UpperCAmelCase : Optional[int]="gelu", _UpperCAmelCase : Optional[int]=0.1, _UpperCAmelCase : Union[str, Any]=0.1, _UpperCAmelCase : Any=5_1_2, _UpperCAmelCase : Dict=1_6, _UpperCAmelCase : Any=2, _UpperCAmelCase : int=0.02, _UpperCAmelCase : str=3, _UpperCAmelCase : str=4, _UpperCAmelCase : List[Any]=None, _UpperCAmelCase : int=0, ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = seq_length
SCREAMING_SNAKE_CASE__ : Dict = is_training
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : List[Any] = use_labels
SCREAMING_SNAKE_CASE__ : int = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = type_vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = num_choices
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : List[str] = projection_dim
def A_ ( self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
SCREAMING_SNAKE_CASE__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE__ : Optional[int] = BertConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=_UpperCAmelCase, initializer_range=self.initializer_range, )
SCREAMING_SNAKE_CASE__ : Tuple = DPRConfig(projection_dim=self.projection_dim, **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : List[str], _UpperCAmelCase : int, _UpperCAmelCase : Tuple, _UpperCAmelCase : Tuple, _UpperCAmelCase : Dict, _UpperCAmelCase : str, _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFDPRContextEncoder(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) )
def A_ ( self : str, _UpperCAmelCase : str, _UpperCAmelCase : List[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFDPRQuestionEncoder(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.projection_dim or self.hidden_size) )
def A_ ( self : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : int, _UpperCAmelCase : List[Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : str ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = TFDPRReader(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape, (self.batch_size,) )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE__
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : Any = {"input_ids": input_ids}
return config, inputs_dict
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def A_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFDPRModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=3_7 )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*_UpperCAmelCase )
def A_ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*_UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*_UpperCAmelCase )
@slow
def A_ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[str] = TFDPRContextEncoder.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Any = TFDPRContextEncoder.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : str = TFDPRQuestionEncoder.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = TFDPRReader.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base" )
SCREAMING_SNAKE_CASE__ : int = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.constant(
[
[
0.03236253,
0.12753335,
0.16818509,
0.00279786,
0.3896933,
0.24264945,
0.2178971,
-0.02335227,
-0.08481959,
-0.14324117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy(), expected_slice.numpy(), atol=1E-4 ) )
| 357 |
def _a ( SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
if num < 0:
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = -num
SCREAMING_SNAKE_CASE__ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__A : List[str] = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ['''PerceiverFeatureExtractor''']
__A : List[Any] = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 33 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = ort.SessionOptions()
__a : Dict = False
return options
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
__a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
__a : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
__a : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__a , feature_extractor=__a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__a )
__a : Tuple = 'A red cat sitting on a park bench'
__a : int = np.random.RandomState(0 )
__a : Tuple = pipe(
prompt=__a , image=__a , mask_image=__a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__a , output_type='np' , )
__a : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 27 | 0 |
def _UpperCamelCase ( snake_case__, snake_case__ ) -> str:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__UpperCAmelCase : Tuple = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : int = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
__UpperCAmelCase : str = max(len(snake_case__ ), len(snake_case__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(snake_case__ ), b_binary.zfill(snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
import logging
import os
from .state import PartialState
class _snake_case ( logging.LoggerAdapter ):
@staticmethod
def _lowerCamelCase ( __lowerCamelCase: Any ) -> int:
__UpperCAmelCase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[Any] , *__lowerCamelCase: List[str] , **__lowerCamelCase: List[Any] ) -> Optional[int]:
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCAmelCase : Any = kwargs.pop("main_process_only" , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = kwargs.pop("in_order" , __lowerCamelCase )
if self.isEnabledFor(__lowerCamelCase ):
if self._should_log(__lowerCamelCase ):
__UpperCAmelCase , __UpperCAmelCase : int = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
elif in_order:
__UpperCAmelCase : Optional[int] = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.process(__lowerCamelCase , __lowerCamelCase )
self.logger.log(__lowerCamelCase , __lowerCamelCase , *__lowerCamelCase , **__lowerCamelCase )
state.wait_for_everyone()
def _UpperCamelCase ( snake_case__, snake_case__ = None ) -> List[str]:
if log_level is None:
__UpperCAmelCase : List[Any] = os.environ.get("ACCELERATE_LOG_LEVEL", snake_case__ )
__UpperCAmelCase : Union[str, Any] = logging.getLogger(snake_case__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case__, {} )
| 342 | 1 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCamelCase : Optional[Any] = getLogger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase = 8 , _lowercase = 1_024 , _lowercase="val" , _lowercase=None , _lowercase=False , _lowercase="summarization" , _lowercase=None , _lowercase=1 , _lowercase = None , _lowercase="" , **_lowercase , ):
SCREAMING_SNAKE_CASE : List[str] = str(_lowercase )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_lowercase )
SCREAMING_SNAKE_CASE : List[str] = Path(_lowercase )
SCREAMING_SNAKE_CASE : Dict = save_dir.joinpath(f"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(_lowercase )
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM.from_pretrained(_lowercase ).cuda()
if fpaa:
SCREAMING_SNAKE_CASE : str = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_lowercase , _lowercase ) # update config with task specific params
SCREAMING_SNAKE_CASE : str = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
SCREAMING_SNAKE_CASE : int = num_return_sequences
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(_lowercase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
SCREAMING_SNAKE_CASE : int = tokenizer.model_max_length
if prefix is None:
SCREAMING_SNAKE_CASE : int = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
SCREAMING_SNAKE_CASE : Union[str, Any] = SeqaSeqDataset(
_lowercase , _lowercase , _lowercase , max_target_length=1_024 , type_path=_lowercase , n_obs=_lowercase , prefix=_lowercase , **_lowercase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
SCREAMING_SNAKE_CASE : List[Any] = ds.make_sortish_sampler(_lowercase , distributed=_lowercase , add_extra_examples=_lowercase , shuffle=_lowercase )
SCREAMING_SNAKE_CASE : List[str] = DataLoader(_lowercase , sampler=_lowercase , batch_size=_lowercase , collate_fn=ds.collate_fn )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for batch in tqdm(_lowercase ):
SCREAMING_SNAKE_CASE : List[str] = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_lowercase , num_beams=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE : Dict = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = batch['''ids''']
if num_return_sequences > 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = chunks(_lowercase , _lowercase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_lowercase ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_lowercase , _lowercase )
return results, sampler.num_replicas
def A ( ):
SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_lowercase , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_lowercase , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_lowercase , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_lowercase , default=_lowercase )
parser.add_argument(
'''--type_path''' , type=_lowercase , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_lowercase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_lowercase , default=8 , required=_lowercase , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_lowercase , default=-1 , required=_lowercase , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_lowercase , default=_lowercase , required=_lowercase , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_lowercase , default=1 , required=_lowercase , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_lowercase , default=600 , required=_lowercase , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_lowercase , default=_lowercase , required=_lowercase )
parser.add_argument('''--tgt_lang''' , type=_lowercase , default=_lowercase , required=_lowercase )
parser.add_argument(
'''--prefix''' , type=_lowercase , required=_lowercase , default=_lowercase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
SCREAMING_SNAKE_CASE : Dict = time.time()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = parser.parse_known_args()
SCREAMING_SNAKE_CASE : Tuple = parse_numeric_n_bool_cl_kwargs(_lowercase )
if generate_kwargs and args.local_rank <= 0:
print(f"""parsed the following generate kwargs: {generate_kwargs}""" )
SCREAMING_SNAKE_CASE : List[str] = Path(args.save_dir + '''_tmp''' )
Path(_lowercase ).mkdir(exist_ok=_lowercase ) # this handles locking.
SCREAMING_SNAKE_CASE : List[Any] = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
SCREAMING_SNAKE_CASE : Dict = {}
if args.src_lang is not None:
SCREAMING_SNAKE_CASE : str = args.src_lang
if args.tgt_lang is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = eval_data_dir(
args.data_dir , _lowercase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_lowercase , **_lowercase , )
if args.local_rank <= 0:
SCREAMING_SNAKE_CASE : Optional[int] = Path(args.save_dir )
save_dir.mkdir(exist_ok=_lowercase )
SCREAMING_SNAKE_CASE : str = gather_results_from_each_node(_lowercase , _lowercase , args.sync_timeout )
SCREAMING_SNAKE_CASE : Tuple = combine_partial_results(_lowercase )
if args.num_return_sequences > 1:
SCREAMING_SNAKE_CASE : Dict = save_dir.joinpath('''pseudolabel_results.json''' )
print(f"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(_lowercase , _lowercase )
return
SCREAMING_SNAKE_CASE : List[str] = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_lowercase ) as f:
SCREAMING_SNAKE_CASE : List[str] = [x.rstrip() for x in f.readlines()][: len(_lowercase )]
# Calculate metrics, save metrics, and save _generations.txt
SCREAMING_SNAKE_CASE : Optional[int] = '''translation''' in args.task
SCREAMING_SNAKE_CASE : List[Any] = calculate_bleu if calc_bleu else calculate_rouge
SCREAMING_SNAKE_CASE : Optional[Any] = '''bleu''' if calc_bleu else '''rouge'''
SCREAMING_SNAKE_CASE : Dict = score_fn(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE : Tuple = len(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() - start_time
SCREAMING_SNAKE_CASE : List[str] = round(runtime / metrics['''n_obs'''] , 4 )
SCREAMING_SNAKE_CASE : Union[str, Any] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
SCREAMING_SNAKE_CASE : str = save_dir.joinpath(f"""{args.type_path}_{metric_name}.json""" )
save_json(_lowercase , _lowercase , indent=_lowercase )
print(_lowercase )
write_txt_file(_lowercase , save_dir.joinpath(f"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(_lowercase , save_dir.joinpath(f"""{args.type_path}.target""" ) )
else:
shutil.rmtree(_lowercase )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : str = []
for partial_result in partial_results:
records.extend(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x["id"] )
SCREAMING_SNAKE_CASE : Tuple = [x['''pred'''] for x in records]
return preds
def A ( _lowercase , _lowercase , _lowercase ):
# WAIT FOR lots of .json files
SCREAMING_SNAKE_CASE : Tuple = time.time()
logger.info('''waiting for all nodes to finish''' )
SCREAMING_SNAKE_CASE : Dict = None
while (time.time() - start_wait) < timeout:
SCREAMING_SNAKE_CASE : Tuple = list(save_dir.glob('''rank_*.json''' ) )
if len(_lowercase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
SCREAMING_SNAKE_CASE : Optional[int] = lmap(_lowercase , _lowercase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 182 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase):
@slow
def __A ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ).loss
SCREAMING_SNAKE_CASE : int = -tf.math.reduce_mean(UpperCamelCase__ ).numpy()
SCREAMING_SNAKE_CASE : List[str] = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 182 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = SamImageProcessor()
__lowerCamelCase = SamProcessor(a )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **a : int ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_image_processor(do_normalize=a , padding_value=1.0 )
__lowerCamelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = SamProcessor(image_processor=a )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(a , return_tensors='''np''' )
__lowerCamelCase = processor(images=a , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = SamProcessor(image_processor=a )
__lowerCamelCase = [torch.ones((1, 3, 5, 5) )]
__lowerCamelCase = [[17_64, 26_46]]
__lowerCamelCase = [[6_83, 10_24]]
__lowerCamelCase = processor.post_process_masks(a , a , a )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowerCamelCase = processor.post_process_masks(
a , torch.tensor(a ) , torch.tensor(a ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowerCamelCase = [np.ones((1, 3, 5, 5) )]
__lowerCamelCase = processor.post_process_masks(a , np.array(a ) , np.array(a ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowerCamelCase = [[1, 0], [0, 1]]
with self.assertRaises(a ):
__lowerCamelCase = processor.post_process_masks(a , np.array(a ) , np.array(a ) )
@require_vision
@require_tf
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = SamImageProcessor()
__lowerCamelCase = SamProcessor(a )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : str ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_image_processor(do_normalize=a , padding_value=1.0 )
__lowerCamelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = SamProcessor(image_processor=a )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(a , return_tensors='''np''' )
__lowerCamelCase = processor(images=a , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = SamProcessor(image_processor=a )
__lowerCamelCase = [tf.ones((1, 3, 5, 5) )]
__lowerCamelCase = [[17_64, 26_46]]
__lowerCamelCase = [[6_83, 10_24]]
__lowerCamelCase = processor.post_process_masks(a , a , a , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowerCamelCase = processor.post_process_masks(
a , tf.convert_to_tensor(a ) , tf.convert_to_tensor(a ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowerCamelCase = [np.ones((1, 3, 5, 5) )]
__lowerCamelCase = processor.post_process_masks(
a , np.array(a ) , np.array(a ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowerCamelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowerCamelCase = processor.post_process_masks(
a , np.array(a ) , np.array(a ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = SamImageProcessor()
__lowerCamelCase = SamProcessor(a )
processor.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : str , **a : Any ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **a ).image_processor
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = SamProcessor(image_processor=a )
__lowerCamelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowerCamelCase = [tf.convert_to_tensor(a )]
__lowerCamelCase = [torch.tensor(a )]
__lowerCamelCase = [[17_64, 26_46]]
__lowerCamelCase = [[6_83, 10_24]]
__lowerCamelCase = processor.post_process_masks(
a , a , a , return_tensors='''tf''' )
__lowerCamelCase = processor.post_process_masks(
a , a , a , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = SamProcessor(image_processor=a )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(a , return_tensors='''pt''' )['''pixel_values'''].numpy()
__lowerCamelCase = processor(images=a , return_tensors='''pt''' )['''pixel_values'''].numpy()
__lowerCamelCase = image_processor(a , return_tensors='''tf''' )['''pixel_values'''].numpy()
__lowerCamelCase = processor(images=a , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(a , a ) )
self.assertTrue(np.allclose(a , a ) )
self.assertTrue(np.allclose(a , a ) )
| 362 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class a__ :
def __init__( self : List[Any] , a : Tuple , a : int , a : int ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(a )][self.get_x(a )]
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int ):
"""simple docstring"""
return int(self.ratio_x * x )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : int ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCAmelCase , __UpperCAmelCase =8_0_0, 6_0_0
__UpperCAmelCase =imread("image_data/lena.jpg", 1)
__UpperCAmelCase =NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 237 | 0 |
from PIL import Image
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Image ) -> Image:
'''simple docstring'''
A__ , A__ = image.size
A__ = 0
A__ = image.load()
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(SCREAMING_SNAKE_CASE_ ):
A__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(SCREAMING_SNAKE_CASE_ ):
for i in range(SCREAMING_SNAKE_CASE_ ):
A__ = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowerCAmelCase__ = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 68 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : Dict = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = ['''GLPNFeatureExtractor''']
_lowerCamelCase : Union[str, Any] = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 369 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "canine"
def __init__( self : int , lowercase : Optional[int]=768 , lowercase : Tuple=12 , lowercase : Union[str, Any]=12 , lowercase : Optional[int]=3_072 , lowercase : Tuple="gelu" , lowercase : Optional[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : int=16_384 , lowercase : Optional[int]=16 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-12 , lowercase : Optional[Any]=0 , lowercase : Dict=0xE000 , lowercase : Optional[Any]=0xE001 , lowercase : Union[str, Any]=4 , lowercase : str=4 , lowercase : Optional[int]=8 , lowercase : List[str]=16_384 , lowercase : Union[str, Any]=128 , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
# Character config:
_snake_case = downsampling_rate
_snake_case = upsampling_kernel_size
_snake_case = num_hash_functions
_snake_case = num_hash_buckets
_snake_case = local_transformer_stride
| 130 | 0 |
import sys
import turtle
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Optional[int]:
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase ) , get_mid(_UpperCAmelCase , _UpperCAmelCase ) , depth - 1 )
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase ) , get_mid(_UpperCAmelCase , _UpperCAmelCase ) , depth - 1 )
triangle(_UpperCAmelCase , get_mid(_UpperCAmelCase , _UpperCAmelCase ) , get_mid(_UpperCAmelCase , _UpperCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
__A : str = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
__A : Any = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 273 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A =1_6
__A =3_2
def a ( _UpperCAmelCase : Accelerator , _UpperCAmelCase : int = 16 ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCAmelCase : Any = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : Optional[int] = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : Any = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : Tuple = 8
else:
__UpperCAmelCase : Optional[int] = None
return tokenizer.pad(
_UpperCAmelCase , padding='''longest''' , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
__UpperCAmelCase : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__A =mocked_dataloaders # noqa: F811
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _UpperCAmelCase ) == "1":
__UpperCAmelCase : Dict = 2
# Initialize accelerator
__UpperCAmelCase : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : List[Any] = config['''lr''']
__UpperCAmelCase : Optional[Any] = int(config['''num_epochs'''] )
__UpperCAmelCase : Optional[int] = int(config['''seed'''] )
__UpperCAmelCase : Any = int(config['''batch_size'''] )
__UpperCAmelCase : Optional[Any] = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__UpperCAmelCase : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCAmelCase : Tuple = batch_size // MAX_GPU_BATCH_SIZE
__UpperCAmelCase : List[str] = MAX_GPU_BATCH_SIZE
set_seed(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Tuple = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : Any = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
__UpperCAmelCase : str = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase : int = model(**_UpperCAmelCase )
__UpperCAmelCase : str = outputs.loss
__UpperCAmelCase : str = loss / gradient_accumulation_steps
accelerator.backward(_UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__UpperCAmelCase : Tuple = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : Dict = model(**_UpperCAmelCase )
__UpperCAmelCase : str = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_UpperCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__UpperCAmelCase : Dict = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCAmelCase : List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
__UpperCAmelCase : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , _UpperCAmelCase )
def a ( ):
'''simple docstring'''
__UpperCAmelCase : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__UpperCAmelCase : int = parser.parse_args()
__UpperCAmelCase : Union[str, Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 226 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCamelCase_ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def UpperCamelCase ( UpperCAmelCase ) ->Any:
"""simple docstring"""
a_ = {}
with open(UpperCAmelCase , "r" ) as file:
for line_number, line in enumerate(UpperCAmelCase ):
a_ = line.strip()
if line:
a_ = line.split()
a_ = line_number
a_ = words[0]
a_ = value
return result
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
for attribute in key.split("." ):
a_ = getattr(UpperCAmelCase , UpperCAmelCase )
a_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase ):
a_ = PARAM_MAPPING[full_name.split("." )[-1]]
a_ = "param"
if weight_type is not None and weight_type != "param":
a_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
a_ = hf_pointer
for attribute in hf_param_name.split("." ):
a_ = getattr(UpperCAmelCase , UpperCAmelCase )
a_ = shape_pointer.shape
# let's reduce dimension
a_ = value[0]
else:
a_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
a_ = getattr(UpperCAmelCase , UpperCAmelCase )
a_ = value
else:
a_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase ):
a_ = PARAM_MAPPING[full_name.split("." )[-1]]
a_ = "param"
if weight_type is not None and weight_type != "param":
a_ = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a_ = ".".join([key, hf_param_name] )
else:
a_ = key
a_ = value if "lm_head" in full_key else value[0]
UpperCamelCase_ = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None ) ->Optional[Any]:
"""simple docstring"""
a_ = False
for key, mapped_key in MAPPING.items():
a_ = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(UpperCAmelCase )[0].split("." )[-2]
a_ = mapped_key.replace("*" , UpperCAmelCase )
if "weight_g" in name:
a_ = "weight_g"
elif "weight_v" in name:
a_ = "weight_v"
elif "bias" in name:
a_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ = "weight"
else:
a_ = None
if hf_dict is not None:
rename_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return is_used
return is_used
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
a_ = True
else:
a_ = load_wavaveca_layer(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
a_ = full_name.split("conv_layers." )[-1]
a_ = name.split("." )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=False ) ->Tuple:
"""simple docstring"""
if config_path is not None:
a_ = WavaVecaConfig.from_pretrained(UpperCAmelCase )
else:
a_ = WavaVecaConfig()
if is_seq_class:
a_ = read_txt_into_dict(UpperCAmelCase )
a_ = idalabel
a_ = WavaVecaForSequenceClassification(UpperCAmelCase )
a_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
feature_extractor.save_pretrained(UpperCAmelCase )
elif is_finetuned:
if dict_path:
a_ = Dictionary.load(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ = target_dict.pad_index
a_ = target_dict.bos_index
a_ = target_dict.eos_index
a_ = len(target_dict.symbols )
a_ = os.path.join(UpperCAmelCase , "vocab.json" )
if not os.path.isdir(UpperCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ = 0
a_ = 1
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
a_ = WavaVecaCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase , )
a_ = True if config.feat_extract_norm == "layer" else False
a_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
a_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
a_ = WavaVecaForCTC(UpperCAmelCase )
else:
a_ = WavaVecaForPreTraining(UpperCAmelCase )
if is_finetuned or is_seq_class:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a_ = argparse.Namespace(task="audio_pretraining" )
a_ = fairseq.tasks.setup_task(UpperCAmelCase )
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCAmelCase )
a_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 351 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
UpperCamelCase_ = 701
UpperCamelCase_ = 1000000000
UpperCamelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 303 | 0 |
"""simple docstring"""
lowerCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def snake_case_ ( A_ : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def snake_case_ ( A_ : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = '''Morse code here!'''
print(A_ )
_lowerCamelCase : List[str] = encrypt(A_ )
print(A_ )
_lowerCamelCase : Any = decrypt(A_ )
print(A_ )
if __name__ == "__main__":
main()
| 72 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 262 | 0 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
A : Tuple = 1_0
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for i in range(__a , __a ):
if array[i] == target:
return i
return -1
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = len(__a )
while left <= right:
if right - left < precision:
return lin_search(__a , __a , __a , __a )
__lowerCAmelCase = (left + right) // 3 + 1
__lowerCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowerCAmelCase = one_third - 1
elif array[two_third] < target:
__lowerCAmelCase = two_third + 1
else:
__lowerCAmelCase = one_third + 1
__lowerCAmelCase = two_third - 1
else:
return -1
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(__a , __a , __a , __a )
__lowerCAmelCase = (left + right) // 3 + 1
__lowerCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__a , one_third - 1 , __a , __a )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __a , __a , __a )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __a , __a )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Tuple = input("Enter numbers separated by comma:\n").strip()
A : List[Any] = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
A : int = int(input("Enter the number to be found in the list:\n").strip())
A : Union[str, Any] = ite_ternary_search(collection, target)
A : Any = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 355 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
A : Any = "bert-base-cased"
A : Any = "google/pegasus-xsum"
A : Union[str, Any] = [" Sam ate lunch today.", "Sams lunch ingredients."]
A : Union[str, Any] = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"]
A : Optional[int] = "patrickvonplaten/t5-tiny-random"
A : int = "sshleifer/bart-tiny-random"
A : Optional[int] = "sshleifer/tiny-mbart"
A : Any = "sshleifer/tiny-marian-en-de"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = "\n".join(_UpperCamelCase )
Path(_UpperCamelCase ).open("w" ).writelines(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_UpperCamelCase , f"{split}.source" ) , _UpperCamelCase )
_dump_articles(os.path.join(_UpperCamelCase , f"{split}.target" ) , _UpperCamelCase )
return tmp_dir
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
__lowerCAmelCase = 4
__lowerCAmelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__lowerCAmelCase , __lowerCAmelCase = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=__a , max_target_length=__a , src_lang=__a , tgt_lang=__a , )
__lowerCAmelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__a , __a )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__lowerCAmelCase = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in ARTICLES )
__lowerCAmelCase = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES )
__lowerCAmelCase = 4
__lowerCAmelCase = LegacySeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=20 , max_target_length=__a , )
__lowerCAmelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def snake_case ( self ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
__lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__lowerCAmelCase = tmp_dir.joinpath("train.source" ).open().readlines()
__lowerCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__a , __a , 1_28 , __a )
__lowerCAmelCase = {x.name for x in tmp_dir.iterdir()}
__lowerCAmelCase = {x.name for x in save_dir.iterdir()}
__lowerCAmelCase = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__a ) < len(__a )
assert len(__a ) == 1
assert len(packed_examples[0] ) == sum(len(__a ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" )
def snake_case ( self ):
if not FAIRSEQ_AVAILABLE:
return
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset(max_len=64 )
__lowerCAmelCase = 64
__lowerCAmelCase = ds.make_dynamic_sampler(__a , required_batch_size_multiple=__a )
__lowerCAmelCase = [len(__a ) for x in batch_sampler]
assert len(set(__a ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__a ) == len(__a ) # no dropped or added examples
__lowerCAmelCase = DataLoader(__a , batch_sampler=__a , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCAmelCase = []
__lowerCAmelCase = []
for batch in data_loader:
__lowerCAmelCase = batch["input_ids"].shape
__lowerCAmelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__lowerCAmelCase = np.product(batch["input_ids"].shape )
num_src_per_batch.append(__a )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__a )
assert num_src_per_batch[0] == max(__a )
if failures:
raise AssertionError(f"too many tokens in {len(__a )} batches" )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset(max_len=5_12 )
__lowerCAmelCase = 2
__lowerCAmelCase = ds.make_sortish_sampler(__a , shuffle=__a )
__lowerCAmelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 )
__lowerCAmelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 , sampler=__a )
__lowerCAmelCase = tokenizer.pad_token_id
def count_pad_tokens(__a , __a="input_ids" ):
return [batch[k].eq(__a ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__a , k="labels" ) ) < sum(count_pad_tokens(__a , k="labels" ) )
assert sum(count_pad_tokens(__a ) ) < sum(count_pad_tokens(__a ) )
assert len(__a ) == len(__a )
def snake_case ( self , __a=10_00 , __a=1_28 ):
if os.getenv("USE_REAL_DATA" , __a ):
__lowerCAmelCase = "examples/seq2seq/wmt_en_ro"
__lowerCAmelCase = max_len * 2 * 64
if not Path(__a ).joinpath("train.len" ).exists():
save_len_file(__a , __a )
else:
__lowerCAmelCase = "examples/seq2seq/test_data/wmt_en_ro"
__lowerCAmelCase = max_len * 4
save_len_file(__a , __a )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a )
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path="train" , max_source_length=__a , max_target_length=__a , n_obs=__a , )
return ds, max_tokens, tokenizer
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._get_dataset()
__lowerCAmelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=__a ) )
__lowerCAmelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=__a ) )
assert idsa.intersection(__a ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def snake_case ( self , __a ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(__a , use_fast=__a )
if tok_name == MBART_TINY:
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , )
__lowerCAmelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__lowerCAmelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , )
__lowerCAmelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__a ) == 1 if tok_name == BART_TINY else len(__a ) == 0
| 259 | 0 |
from __future__ import annotations
def UpperCAmelCase ( a_ ) -> Tuple:
"""simple docstring"""
__A = len(a_ ) // 2
# choose the middle 3 elements
__A = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 |
"""simple docstring"""
from __future__ import annotations
import math
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Any = size
# approximate the overall size of segment tree with given value
lowerCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase : List[str] = [0 for i in range(0 , 4 * size )]
lowerCAmelCase : Dict = [0 for i in range(0 , 4 * size )] # flag for lazy update
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return idx * 2
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
return idx * 2 + 1
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if left_element == right_element:
lowerCAmelCase : List[str] = a[left_element - 1]
else:
lowerCAmelCase : Tuple = (left_element + right_element) // 2
self.build(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ )
self.build(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = max(
self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.flag[idx] is True:
lowerCAmelCase : Optional[int] = self.lazy[idx]
lowerCAmelCase : List[str] = False
if left_element != right_element:
lowerCAmelCase : Optional[Any] = self.lazy[idx]
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Optional[Any] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase : str = val
if left_element != right_element:
lowerCAmelCase : Optional[Any] = val
lowerCAmelCase : Union[str, Any] = val
lowerCAmelCase : int = True
lowerCAmelCase : int = True
return True
lowerCAmelCase : List[str] = (left_element + right_element) // 2
self.update(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
self.update(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Optional[int] = max(
self.segment_tree[self.left(snake_case__ )] , self.segment_tree[self.right(snake_case__ )] )
return True
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
if self.flag[idx] is True:
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : str = False
if left_element != right_element:
lowerCAmelCase : Tuple = self.lazy[idx]
lowerCAmelCase : List[Any] = self.lazy[idx]
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : str = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase : Any = (left_element + right_element) // 2
lowerCAmelCase : Optional[int] = self.query(self.left(snake_case__ ) , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowerCAmelCase : Dict = self.query(self.right(snake_case__ ) , mid + 1 , snake_case__ , snake_case__ , snake_case__ )
return max(snake_case__ , snake_case__ )
def __str__( self ):
"""simple docstring"""
return str([self.query(1 , 1 , self.size , snake_case__ , snake_case__ ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase__ = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
lowerCAmelCase__ = 15
lowerCAmelCase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 108 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
lowercase : nn.Module
lowercase : List[nn.Module] = field(default_factory=lowerCamelCase__ )
lowercase : list = field(default_factory=lowerCamelCase__ )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(__UpperCamelCase , nn.Convad ) or isinstance(__UpperCamelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__UpperCamelCase )
def __call__( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__UpperCamelCase )
[x.remove() for x in self.handles]
return self
@property
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return list(filter(lambda __UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
lowercase : nn.Module
lowercase : nn.Module
lowercase : int = 0
lowercase : List = field(default_factory=lowerCamelCase__ )
lowercase : List = field(default_factory=lowerCamelCase__ )
def __call__( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = Tracker(self.dest )(__UpperCamelCase ).parametrized
__UpperCamelCase : Union[str, Any] = Tracker(self.src )(__UpperCamelCase ).parametrized
__UpperCamelCase : Union[str, Any] = list(filter(lambda __UpperCamelCase : type(__UpperCamelCase ) not in self.src_skip , __UpperCamelCase ) )
__UpperCamelCase : Any = list(filter(lambda __UpperCamelCase : type(__UpperCamelCase ) not in self.dest_skip , __UpperCamelCase ) )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(__UpperCamelCase )} operations while'''
f''' destination module has {len(__UpperCamelCase )}.''' )
for dest_m, src_m in zip(__UpperCamelCase , __UpperCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : ResNetConfig , _lowerCAmelCase : Path , _lowerCAmelCase : bool = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
__UpperCamelCase : Optional[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(_lowerCAmelCase ).eval()
__UpperCamelCase : Any = ModuleTransfer(src=_lowerCAmelCase , dest=_lowerCAmelCase )
__UpperCamelCase : Optional[int] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_lowerCAmelCase )
assert torch.allclose(from_model(_lowerCAmelCase ) , our_model(_lowerCAmelCase ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Tuple = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(_lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_lowerCAmelCase , )
# we can use the convnext one
__UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_lowerCAmelCase , )
print(F'''Pushed {checkpoint_name}''' )
def UpperCAmelCase_ (_lowerCAmelCase : Path , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Dict = 10_00
__UpperCamelCase : Any = (1, num_labels)
__UpperCamelCase : Union[str, Any] = "huggingface/label-files"
__UpperCamelCase : List[Any] = num_labels
__UpperCamelCase : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__UpperCamelCase : List[str] = idalabel
__UpperCamelCase : str = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Dict = partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase )
__UpperCamelCase : List[str] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(_lowerCAmelCase , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowercase : Union[str, Any] = parser.parse_args()
lowercase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 171 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase_ (_lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
# Construct model
if openai_config_file == "":
__UpperCamelCase : Any = OpenAIGPTConfig()
else:
__UpperCamelCase : Union[str, Any] = OpenAIGPTConfig.from_json_file(_lowerCAmelCase )
__UpperCamelCase : int = OpenAIGPTModel(_lowerCAmelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
__UpperCamelCase : List[str] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__UpperCamelCase : int = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , _lowerCAmelCase )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
lowercase : Tuple = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 171 | 1 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase__ = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = create_model(
'HTSAT-tiny' , 'roberta' , _UpperCamelCase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=_UpperCamelCase , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = {}
__lowerCAmelCase : str = r'.*sequential.(\d+).*'
__lowerCAmelCase : int = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCAmelCase : Union[str, Any] = key.replace(_UpperCamelCase , _UpperCamelCase )
if re.match(_UpperCamelCase , _UpperCamelCase ):
# replace sequential layers with list
__lowerCAmelCase : List[str] = re.match(_UpperCamelCase , _UpperCamelCase ).group(1 )
__lowerCAmelCase : Tuple = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(_UpperCamelCase )//3}.linear." )
elif re.match(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Dict = int(re.match(_UpperCamelCase , _UpperCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowerCAmelCase : Tuple = 1 if projecton_layer == 0 else 2
__lowerCAmelCase : List[str] = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowerCAmelCase : int = value
__lowerCAmelCase : Union[str, Any] = mixed_qkv.size(0 ) // 3
__lowerCAmelCase : Optional[int] = mixed_qkv[:qkv_dim]
__lowerCAmelCase : Optional[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowerCAmelCase : Optional[int] = mixed_qkv[qkv_dim * 2 :]
__lowerCAmelCase : Optional[Any] = query_layer
__lowerCAmelCase : Dict = key_layer
__lowerCAmelCase : str = value_layer
else:
__lowerCAmelCase : Dict = value
return model_state_dict
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = init_clap(_UpperCamelCase , enable_fusion=_UpperCamelCase )
clap_model.eval()
__lowerCAmelCase : Optional[int] = clap_model.state_dict()
__lowerCAmelCase : Dict = rename_state_dict(_UpperCamelCase )
__lowerCAmelCase : List[str] = ClapConfig()
__lowerCAmelCase : List[str] = enable_fusion
__lowerCAmelCase : Dict = ClapModel(_UpperCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
transformers_config.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCamelCase__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 86 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 0 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
UpperCamelCase__ : str = img
UpperCamelCase__ : Optional[Any] = img.shape[1]
UpperCamelCase__ : List[Any] = img.shape[0]
UpperCamelCase__ : Tuple = dst_width
UpperCamelCase__ : List[Any] = dst_height
UpperCamelCase__ : str = self.src_w / self.dst_w
UpperCamelCase__ : Union[str, Any] = self.src_h / self.dst_h
UpperCamelCase__ : List[Any] = (
np.ones((self.dst_h, self.dst_w, 3), np.uinta ) * 255
)
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
UpperCamelCase__ : Dict = self.img[self.get_y(__magic_name__ )][self.get_x(__magic_name__ )]
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
return int(self.ratio_x * x )
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = 800, 600
UpperCAmelCase_ : Tuple = imread('image_data/lena.jpg', 1)
UpperCAmelCase_ : Tuple = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 362 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Optional[Any]=False ) -> List[Any]:
UpperCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Any , __UpperCAmelCase: Dict=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ : Tuple = ''''''
else:
UpperCamelCase__ : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Dict = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : int = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> Optional[Any]:
UpperCamelCase__ : int = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: str , __UpperCAmelCase: Tuple ) -> Dict:
UpperCamelCase__ : List[str] = dct.pop(__UpperCAmelCase )
UpperCamelCase__ : int = val
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: Dict , __UpperCAmelCase: List[Any]=True ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCamelCase__ : List[str] = 8
# set labels if required
if not base_model:
UpperCamelCase__ : Union[str, Any] = 1000
UpperCamelCase__ : Optional[Any] = '''huggingface/label-files'''
UpperCamelCase__ : Dict = '''imagenet-1k-id2label.json'''
UpperCamelCase__ : str = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Dict = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : str = idalabel
UpperCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCamelCase__ : str = 384
UpperCamelCase__ : str = 1536
UpperCamelCase__ : Tuple = 12
UpperCamelCase__ : Optional[int] = 6
# load original model from torch hub
UpperCamelCase__ : Any = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : str = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
UpperCamelCase__ : int = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
UpperCamelCase__ : int = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
UpperCamelCase__ : Optional[int] = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCamelCase__ : Dict = ViTImageProcessor()
UpperCamelCase__ : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase__ : Optional[Any] = encoding['''pixel_values''']
UpperCamelCase__ : Optional[Any] = model(__UpperCAmelCase )
if base_model:
UpperCamelCase__ : Union[str, Any] = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
UpperCamelCase__ : Any = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1e-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
UpperCAmelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 247 | 0 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''image_processor''', '''tokenizer''']
snake_case = '''OwlViTImageProcessor'''
snake_case = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : List[Any] , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
_A = kwargs.pop("feature_extractor" )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self : Optional[Any] , __UpperCAmelCase : Any=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Any="max_length" , __UpperCAmelCase : List[Any]="np" , **__UpperCAmelCase : str ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and not isinstance(text[0] , __UpperCAmelCase )):
_A = [self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and isinstance(text[0] , __UpperCAmelCase ):
_A = []
# Maximum number of queries across batch
_A = max([len(__UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCAmelCase ) != max_num_queries:
_A = t + [" "] * (max_num_queries - len(__UpperCAmelCase ))
_A = self.tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
encodings.append(__UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_A = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_A = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_A = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_A = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_A = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_A = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_A = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_A = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_A = BatchEncoding()
_A = input_ids
_A = attention_mask
if query_images is not None:
_A = BatchEncoding()
_A = self.image_processor(
__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ).pixel_values
_A = query_pixel_values
if images is not None:
_A = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Dict ):
'''simple docstring'''
return self.image_processor.post_process(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : int , *__UpperCAmelCase : int , **__UpperCAmelCase : int ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Dict , *__UpperCAmelCase : str , **__UpperCAmelCase : Optional[Any] ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : str , *__UpperCAmelCase : Any , **__UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : int , *__UpperCAmelCase : Any , **__UpperCAmelCase : int ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __UpperCAmelCase , )
return self.image_processor
| 79 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Union[str, Any] = []
for part_id in partition_order:
snake_case__ : Any = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(__lowerCAmelCase ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(100 ).repartition(1 )
snake_case__ : Optional[int] = Spark(__lowerCAmelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Dict = spark.range(10 ).repartition(2 )
snake_case__ : Any = [1, 0]
snake_case__ : Tuple = _generate_iterable_examples(__lowerCAmelCase , __lowerCAmelCase ) # Reverse the partitions.
snake_case__ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , __lowerCAmelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__ , snake_case__ : Union[str, Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
snake_case__ : Any = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : List[Any] = spark.range(10 ).repartition(1 )
snake_case__ : int = SparkExamplesIterable(__lowerCAmelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : List[str] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Tuple = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
snake_case__ : Union[str, Any] = lambda __lowerCAmelCase : x.reverse()
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [2, 1, 0] )
snake_case__ : List[str] = SparkExamplesIterable(__lowerCAmelCase ).shuffle_data_sources(__lowerCAmelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Dict = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : List[str] = SparkExamplesIterable(__lowerCAmelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__lowerCAmelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(__lowerCAmelCase ):
snake_case__ , snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ( ) -> Dict:
"""simple docstring"""
snake_case__ : int = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case__ : Dict = spark.range(100 ).repartition(1 )
snake_case__ : Tuple = Spark(__lowerCAmelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 230 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any )->str:
'''simple docstring'''
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Any )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=__lowercase , dtype=jnp.bfloataa )
__lowerCAmelCase : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=__lowercase , from_pt=__lowercase , dtype=jnp.bfloataa )
__lowerCAmelCase : Optional[Any] = controlnet_params
__lowerCAmelCase : List[str] = '''bird'''
__lowerCAmelCase : Dict = jax.device_count()
__lowerCAmelCase : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
__lowerCAmelCase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
__lowerCAmelCase : int = pipe.prepare_image_inputs([canny_image] * num_samples )
__lowerCAmelCase : int = jax.random.PRNGKey(0 )
__lowerCAmelCase : Optional[int] = jax.random.split(__lowercase , jax.device_count() )
__lowerCAmelCase : Tuple = replicate(__lowercase )
__lowerCAmelCase : Optional[int] = shard(__lowercase )
__lowerCAmelCase : str = shard(__lowercase )
__lowerCAmelCase : Tuple = pipe(
prompt_ids=__lowercase , image=__lowercase , params=__lowercase , prng_seed=__lowercase , num_inference_steps=50 , jit=__lowercase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__lowerCAmelCase : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCAmelCase : List[Any] = images[0, 253:256, 253:256, -1]
__lowerCAmelCase : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCAmelCase : Union[str, Any] = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Any )->Dict:
'''simple docstring'''
__lowerCAmelCase : int = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=__lowercase , dtype=jnp.bfloataa )
__lowerCAmelCase : Dict = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=__lowercase , from_pt=__lowercase , dtype=jnp.bfloataa )
__lowerCAmelCase : Optional[int] = controlnet_params
__lowerCAmelCase : Dict = '''Chef in the kitchen'''
__lowerCAmelCase : Any = jax.device_count()
__lowerCAmelCase : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples )
__lowerCAmelCase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
__lowerCAmelCase : List[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
__lowerCAmelCase : Any = jax.random.PRNGKey(0 )
__lowerCAmelCase : Dict = jax.random.split(__lowercase , jax.device_count() )
__lowerCAmelCase : Union[str, Any] = replicate(__lowercase )
__lowerCAmelCase : Union[str, Any] = shard(__lowercase )
__lowerCAmelCase : Any = shard(__lowercase )
__lowerCAmelCase : List[Any] = pipe(
prompt_ids=__lowercase , image=__lowercase , params=__lowercase , prng_seed=__lowercase , num_inference_steps=50 , jit=__lowercase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__lowerCAmelCase : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__lowerCAmelCase : str = images[0, 253:256, 253:256, -1]
__lowerCAmelCase : Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowerCAmelCase : str = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 367 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = PegasusTokenizer
A_ = PegasusTokenizerFast
A_ = True
A_ = True
def UpperCAmelCase__ ( self : List[str] )->Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Optional[int] = PegasusTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def UpperCAmelCase__ ( self : Optional[Any] , **_snake_case : Tuple )->PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self : Dict , _snake_case : List[Any] )->Tuple:
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase__ ( self : Union[str, Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = """</s>"""
__lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def UpperCAmelCase__ ( self : int )->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_snake_case ) , 1103 )
def UpperCAmelCase__ ( self : Optional[int] )->Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def UpperCAmelCase__ ( self : Dict )->str:
'''simple docstring'''
__lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
__lowerCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
__lowerCAmelCase : Tuple = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Optional[int] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCAmelCase : Tuple = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
__lowerCAmelCase : List[str] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
__lowerCAmelCase : str = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : List[str] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCAmelCase : Tuple = """To ensure a smooth flow of bank resolutions."""
__lowerCAmelCase : Optional[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
__lowerCAmelCase : int = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ ( self : Any )->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = ["""This is going to be way too long.""" * 150, """short example"""]
__lowerCAmelCase : Union[str, Any] = ["""not super long but more than 5 tokens""", """tiny"""]
__lowerCAmelCase : Dict = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
__lowerCAmelCase : Tuple = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = PegasusTokenizer
A_ = PegasusTokenizerFast
A_ = True
A_ = True
def UpperCAmelCase__ ( self : Tuple )->Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Any = PegasusTokenizer(_snake_case , offset=0 , mask_token_sent=_snake_case , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : Any )->str:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def UpperCAmelCase__ ( self : Union[str, Any] , **_snake_case : Optional[Any] )->PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self : List[str] , _snake_case : Optional[int] )->Union[str, Any]:
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase__ ( self : List[Any] )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : int = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
__lowerCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
__lowerCAmelCase : Tuple = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
@require_torch
def UpperCAmelCase__ ( self : str )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : int = ["""This is going to be way too long.""" * 1000, """short example"""]
__lowerCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
__lowerCAmelCase : str = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
__lowerCAmelCase : List[Any] = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
__lowerCAmelCase : Optional[Any] = self._large_tokenizer(_snake_case ).input_ids
self.assertListEqual(
_snake_case , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 232 | 0 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def UpperCamelCase ( *snake_case__ : List[str] , **snake_case__ : int ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
snake_case_ : Any = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCamelCase ( self : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
_UpperCAmelCase = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : Optional[int] ):
"""simple docstring"""
_UpperCAmelCase = object_detector(examples[0] , threshold=0.0 )
_UpperCAmelCase = len(snake_case__ )
self.assertGreater(snake_case__ , 0 )
self.assertEqual(
snake_case__ , [
{
"score": ANY(snake_case__ ),
"label": ANY(snake_case__ ),
"box": {"xmin": ANY(snake_case__ ), "ymin": ANY(snake_case__ ), "xmax": ANY(snake_case__ ), "ymax": ANY(snake_case__ )},
}
for i in range(snake_case__ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def UpperCamelCase ( self : int ):
"""simple docstring"""
pass
@require_torch
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
_UpperCAmelCase = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
] , )
_UpperCAmelCase = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.7_235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.7_184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}},
{"score": 0.6_748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}},
{"score": 0.6_456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}},
{"score": 0.6_419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}},
]
] , )
@require_torch
@slow
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = pipeline("zero-shot-object-detection" )
_UpperCAmelCase = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
] , )
_UpperCAmelCase = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
[
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
{"score": 0.1_474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}},
{"score": 0.1_208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def UpperCamelCase ( self : str ):
"""simple docstring"""
pass
@require_torch
@slow
def UpperCamelCase ( self : str ):
"""simple docstring"""
_UpperCAmelCase = 0.2
_UpperCAmelCase = pipeline("zero-shot-object-detection" )
_UpperCAmelCase = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
{"score": 0.2_537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}},
] , )
@require_torch
@slow
def UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = pipeline("zero-shot-object-detection" )
_UpperCAmelCase = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=snake_case__ , )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{"score": 0.2_868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}},
] , )
| 133 |
from typing import Any
class __lowerCAmelCase :
def __init__( self : List[Any] , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
class __lowerCAmelCase :
def __init__( self : Optional[Any] ):
"""simple docstring"""
_UpperCAmelCase = None
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase = temp.next
print()
def UpperCamelCase ( self : Any , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = Node(snake_case__ )
_UpperCAmelCase = self.head
_UpperCAmelCase = new_node
def UpperCamelCase ( self : List[str] , snake_case__ : int , snake_case__ : Optional[Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
lowercase_ : Union[str, Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 133 | 1 |
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class _a ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Optional[int] = 'autoformer'
A : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self, A = None, A = None, A = "student_t", A = "nll", A = 1, A = [1, 2, 3, 4, 5, 6, 7], A = True, A = 0, A = 0, A = 0, A = 0, A = None, A = None, A = 64, A = 2, A = 2, A = 2, A = 2, A = 32, A = 32, A = "gelu", A = 0.1, A = 0.1, A = 0.1, A = 0.1, A = 0.1, A = 100, A = 0.02, A = True, A=True, A = 10, A = 25, A = 3, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = prediction_length
SCREAMING_SNAKE_CASE : Union[str, Any] = context_length if context_length is not None else prediction_length
SCREAMING_SNAKE_CASE : str = distribution_output
SCREAMING_SNAKE_CASE : List[Any] = loss
SCREAMING_SNAKE_CASE : Optional[Any] = input_size
SCREAMING_SNAKE_CASE : int = num_time_features
SCREAMING_SNAKE_CASE : Dict = lags_sequence
SCREAMING_SNAKE_CASE : Optional[Any] = scaling
SCREAMING_SNAKE_CASE : List[Any] = num_dynamic_real_features
SCREAMING_SNAKE_CASE : Union[str, Any] = num_static_real_features
SCREAMING_SNAKE_CASE : str = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
SCREAMING_SNAKE_CASE : Dict = cardinality
else:
SCREAMING_SNAKE_CASE : str = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(_SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
SCREAMING_SNAKE_CASE : str = embedding_dimension
else:
SCREAMING_SNAKE_CASE : str = [min(50, (cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE : Tuple = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features
SCREAMING_SNAKE_CASE : Tuple = d_model
SCREAMING_SNAKE_CASE : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_layers
SCREAMING_SNAKE_CASE : Dict = decoder_layers
SCREAMING_SNAKE_CASE : Tuple = dropout
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : List[str] = activation_dropout
SCREAMING_SNAKE_CASE : Tuple = encoder_layerdrop
SCREAMING_SNAKE_CASE : int = decoder_layerdrop
SCREAMING_SNAKE_CASE : Tuple = activation_function
SCREAMING_SNAKE_CASE : Tuple = init_std
SCREAMING_SNAKE_CASE : List[Any] = use_cache
# Autoformer
SCREAMING_SNAKE_CASE : Tuple = label_length
SCREAMING_SNAKE_CASE : Optional[int] = moving_average
SCREAMING_SNAKE_CASE : List[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 364 |
'''simple docstring'''
import math
def lowercase__( __UpperCamelCase: float ,__UpperCamelCase: float ):
"""simple docstring"""
return math.pow(__UpperCamelCase ,2 ) - a
def lowercase__( __UpperCamelCase: float ):
"""simple docstring"""
return 2 * x
def lowercase__( __UpperCamelCase: float ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 2.0
while start <= a:
SCREAMING_SNAKE_CASE : Dict = math.pow(__UpperCamelCase ,2 )
return start
def lowercase__( __UpperCamelCase: float ,__UpperCamelCase: int = 99_99 ,__UpperCamelCase: float = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ):
"""simple docstring"""
if a < 0:
raise ValueError('math domain error' )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_initial_point(__UpperCamelCase )
for _ in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = value
SCREAMING_SNAKE_CASE : Dict = value - fx(__UpperCamelCase ,__UpperCamelCase ) / fx_derivative(__UpperCamelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 246 | 0 |
'''simple docstring'''
class lowercase_ :
def __init__( self , a , a , a ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = graph
self._normalize_graph(a , a )
UpperCamelCase__ = len(a )
UpperCamelCase__ = None
def __a ( self , a , a ):
if sources is int:
UpperCamelCase__ = [sources]
if sinks is int:
UpperCamelCase__ = [sinks]
if len(a ) == 0 or len(a ) == 0:
return
UpperCamelCase__ = sources[0]
UpperCamelCase__ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(a ) > 1 or len(a ) > 1:
UpperCamelCase__ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCamelCase__ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
UpperCamelCase__ = max_input_flow
UpperCamelCase__ = 0
UpperCamelCase__ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCamelCase__ = max_input_flow
UpperCamelCase__ = size - 1
def __a ( self ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __a ( self , a ):
UpperCamelCase__ = algorithm(self )
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = flow_network
UpperCamelCase__ = flow_network.verticesCount
UpperCamelCase__ = flow_network.sourceIndex
UpperCamelCase__ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCamelCase__ = flow_network.graph
UpperCamelCase__ = False
def __a ( self ):
if not self.executed:
self._algorithm()
UpperCamelCase__ = True
def __a ( self ):
pass
class lowercase_ ( a__ ):
def __init__( self , a ):
super().__init__(a )
# use this to save your result
UpperCamelCase__ = -1
def __a ( self ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class lowercase_ ( a__ ):
def __init__( self , a ):
super().__init__(a )
UpperCamelCase__ = [[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCamelCase__ = [0] * self.verticies_count
UpperCamelCase__ = [0] * self.verticies_count
def __a ( self ):
UpperCamelCase__ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCamelCase__ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCamelCase__ = 0
while i < len(a ):
UpperCamelCase__ = vertices_list[i]
UpperCamelCase__ = self.heights[vertex_index]
self.process_vertex(a )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(a ) )
UpperCamelCase__ = 0
else:
i += 1
UpperCamelCase__ = sum(self.preflow[self.source_index] )
def __a ( self , a ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(a , a )
self.relabel(a )
def __a ( self , a , a ):
UpperCamelCase__ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __a ( self , a ):
UpperCamelCase__ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCamelCase__ = self.heights[to_index]
if min_height is not None:
UpperCamelCase__ = min_height + 1
if __name__ == "__main__":
a__ : List[str] = [0]
a__ : Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a__ : Optional[int] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a__ : List[Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a__ : Optional[Any] = flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 80 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( __A , __A , __A , __A , __A ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , __A , __A , __A ) , minimax(depth + 1 , node_index * 2 + 1 , __A , __A , __A ) , )
)
def _UpperCamelCase ( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase__ = math.log(len(__A ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , __A , __A , __A )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 80 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a_ ( a_ ):
'''simple docstring'''
__a: Optional[Any] = ['''image_processor''', '''tokenizer''']
__a: List[str] = '''LayoutLMv3ImageProcessor'''
__a: str = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
lowerCAmelCase_ = kwargs.pop('feature_extractor' )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
lowerCAmelCase_ = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase_ = features['words']
lowerCAmelCase_ = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowerCAmelCase_ = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase_ = self.get_overflowing_images(lowercase_ , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase_ = images
return encoded_inputs
def _lowercase ( self , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def _lowercase ( self , *lowercase_ , **lowercase_ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowercase ( self ) -> str:
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 14 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase ( a_ , a_ , a_=None , a_=None ) -> int:
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class a_ :
'''simple docstring'''
__a: Tuple = OPTConfig
__a: Optional[Any] = {}
__a: Tuple = '''gelu'''
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=False , lowercase_=9_9 , lowercase_=1_6 , lowercase_=2 , lowercase_=4 , lowercase_=4 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=2_0 , lowercase_=2 , lowercase_=1 , lowercase_=0 , lowercase_=1_6 , lowercase_=1_6 , ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
lowerCAmelCase_ = embed_dim
lowerCAmelCase_ = word_embed_proj_dim
lowerCAmelCase_ = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , )
lowerCAmelCase_ = prepare_opt_inputs_dict(lowercase_ , lowercase_ )
return config, inputs_dict
def _lowercase ( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel(config=lowercase_ )
lowerCAmelCase_ = inputs_dict['input_ids']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['attention_mask'][:1, :]
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ )
lowerCAmelCase_ , lowerCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ )[0]
lowerCAmelCase_ = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-3 )
@require_tf
class a_ ( a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__a: Optional[Any] = (TFOPTForCausalLM,) if is_tf_available() else ()
__a: Union[str, Any] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__a: int = False
__a: List[Any] = False
__a: Dict = False
__a: List[Any] = 1_0
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=lowercase_ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase_ , lowercase_ ):
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase_ , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]:
# build the embeddings
lowerCAmelCase_ = model_class(config=lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase_ )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() )
lowerCAmelCase_ = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCAmelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase_ )
# check that weights remain the same after resizing
lowerCAmelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase_ )
lowerCAmelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCAmelCase_ = False
self.assertTrue(lowercase_ )
def lowerCamelCase ( a_ ) -> Any:
return tf.constant(a_ , dtype=tf.intaa )
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = 9_9
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCAmelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCAmelCase_ = input_ids.shape[0]
lowerCAmelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTModel.from_pretrained('facebook/opt-350m' )
lowerCAmelCase_ = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ = tf.not_equal(lowercase_ , model.config.pad_token_id )
with tf.GradientTape():
lowerCAmelCase_ = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state
lowerCAmelCase_ = (1, 1_1, 5_1_2)
self.assertEqual(output.shape , lowercase_ )
lowerCAmelCase_ = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-3 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = xla_generate(lowercase_ , lowercase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4e-2 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
lowerCAmelCase_ = 'facebook/opt-350m'
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ , add_special_tokens=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCAmelCase_ = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
lowerCAmelCase_ = tf.function(lowercase_ , jit_compile=lowercase_ )
lowerCAmelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1e-4 ) )
@require_tf
@slow
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-125m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
lowerCAmelCase_ = 'left'
# use different length sentences to test batching
lowerCAmelCase_ = [
'Hello, my dog is a little',
'Today, I',
]
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' , padding=lowercase_ )
lowerCAmelCase_ = inputs['input_ids']
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , attention_mask=inputs['attention_mask'] )
lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ )
lowerCAmelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
lowerCAmelCase_ = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = 'facebook/opt-350m'
lowerCAmelCase_ = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
lowerCAmelCase_ = []
lowerCAmelCase_ = GPTaTokenizer.from_pretrained(lowercase_ )
lowerCAmelCase_ = TFOPTForCausalLM.from_pretrained(lowercase_ )
for prompt in self.prompts:
lowerCAmelCase_ = tokenizer(lowercase_ , return_tensors='tf' ).input_ids
lowerCAmelCase_ = model.generate(lowercase_ , max_length=1_0 )
lowerCAmelCase_ = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
predicted_outputs += generated_string
self.assertListEqual(lowercase_ , lowercase_ )
| 14 | 1 |
'''simple docstring'''
lowerCAmelCase :Tuple = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCAmelCase :Any = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowerCamelCase ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowerCamelCase ( lowerCAmelCase : List[str] ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = '''Morse code here!'''
print(_lowercase )
__magic_name__ : List[str] = encrypt(_lowercase )
print(_lowercase )
__magic_name__ : Tuple = decrypt(_lowercase )
print(_lowercase )
if __name__ == "__main__":
main()
| 331 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318 | 0 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict=13 , SCREAMING_SNAKE_CASE : List[Any]=7 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=99 , SCREAMING_SNAKE_CASE : List[str]=24 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : List[str]=6 , SCREAMING_SNAKE_CASE : Optional[int]=37 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=5_12 , SCREAMING_SNAKE_CASE : Any=16 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : str=0.0_2 , SCREAMING_SNAKE_CASE : List[Any]=3 , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Tuple=10_00 , ):
'''simple docstring'''
UpperCamelCase__ : Dict = parent
UpperCamelCase__ : Tuple = batch_size
UpperCamelCase__ : Dict = seq_length
UpperCamelCase__ : str = is_training
UpperCamelCase__ : int = use_input_mask
UpperCamelCase__ : Optional[Any] = use_token_type_ids
UpperCamelCase__ : Any = use_labels
UpperCamelCase__ : int = vocab_size
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : Any = num_attention_heads
UpperCamelCase__ : List[str] = intermediate_size
UpperCamelCase__ : List[Any] = hidden_act
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[Any] = max_position_embeddings
UpperCamelCase__ : List[Any] = type_vocab_size
UpperCamelCase__ : Dict = type_sequence_label_size
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : List[Any] = num_labels
UpperCamelCase__ : List[Any] = scope
UpperCamelCase__ : Dict = range_bbox
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ : int = bbox[i, j, 3]
UpperCamelCase__ : Optional[Any] = bbox[i, j, 1]
UpperCamelCase__ : List[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ : Any = bbox[i, j, 2]
UpperCamelCase__ : List[str] = bbox[i, j, 0]
UpperCamelCase__ : Dict = t
UpperCamelCase__ : List[Any] = None
if self.use_input_mask:
UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase__ : int = None
if self.use_token_type_ids:
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __lowercase ( self : Tuple ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
UpperCamelCase__ : Tuple = LiltModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ : Dict = model(snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
UpperCamelCase__ : List[str] = model(snake_case_ , bbox=snake_case_ , token_type_ids=snake_case_ )
UpperCamelCase__ : List[str] = model(snake_case_ , bbox=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = self.num_labels
UpperCamelCase__ : Tuple = LiltForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ : Optional[int] = model(
snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
UpperCamelCase__ : Tuple = LiltForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase__ : Union[str, Any] = model(
snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Any = self.prepare_config_and_inputs()
(
UpperCamelCase__
) : int = config_and_inputs
UpperCamelCase__ : Optional[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
_lowerCAmelCase : Any = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase : Optional[Any] = (
{
"feature-extraction": LiltModel,
"question-answering": LiltForQuestionAnswering,
"text-classification": LiltForSequenceClassification,
"token-classification": LiltForTokenClassification,
"zero-shot": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = False
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return True
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCamelCase__ : Dict = LiltModelTester(self )
UpperCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def __lowercase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ : List[str] = type
self.model_tester.create_and_check_model(*snake_case_ )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@slow
def __lowercase ( self : Dict ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Union[str, Any] = LiltModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@slow
class __a ( unittest.TestCase ):
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(snake_case_ )
UpperCamelCase__ : str = torch.tensor([[1, 2]] , device=snake_case_ )
UpperCamelCase__ : Dict = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ : Tuple = model(input_ids=snake_case_ , bbox=snake_case_ )
UpperCamelCase__ : Tuple = torch.Size([1, 2, 7_68] )
UpperCamelCase__ : Optional[Any] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=snake_case_ , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case_ , atol=1e-3 ) )
| 352 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase : str =logging.get_logger(__name__)
@add_end_docstrings(A__ )
class __a ( A__ ):
def __init__( self : List[str] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict , SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return super().__call__(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = {}
if "candidate_labels" in kwargs:
UpperCamelCase__ : Optional[Any] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
UpperCamelCase__ : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}." ):
'''simple docstring'''
UpperCamelCase__ : Dict = load_image(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
UpperCamelCase__ : Any = candidate_labels
UpperCamelCase__ : Dict = [hypothesis_template.format(SCREAMING_SNAKE_CASE ) for x in candidate_labels]
UpperCamelCase__ : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = [text_inputs]
return inputs
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = model_inputs.pop("candidate_labels" )
UpperCamelCase__ : List[str] = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = text_inputs[0]
else:
# Batching case.
UpperCamelCase__ : Union[str, Any] = text_inputs[0][0]
UpperCamelCase__ : Any = self.model(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = model_outputs.pop("candidate_labels" )
UpperCamelCase__ : int = model_outputs["logits"][0]
if self.framework == "pt":
UpperCamelCase__ : Dict = logits.softmax(dim=-1 ).squeeze(-1 )
UpperCamelCase__ : Optional[Any] = probs.tolist()
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[Any] = [scores]
elif self.framework == "tf":
UpperCamelCase__ : Optional[Any] = stable_softmax(SCREAMING_SNAKE_CASE , axis=-1 )
UpperCamelCase__ : Optional[int] = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
UpperCamelCase__ : Optional[int] = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : -x[0] )
]
return result
| 196 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['ConvNextFeatureExtractor']
lowerCAmelCase__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 11 |
'''simple docstring'''
from math import sqrt
def snake_case__ ( _A: int = 1000000 ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_A , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'{solution() = }')
| 272 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
__magic_name__ : str = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(_UpperCAmelCase )
DownloadCommand.register_subcommand(_UpperCAmelCase )
EnvironmentCommand.register_subcommand(_UpperCAmelCase )
RunCommand.register_subcommand(_UpperCAmelCase )
ServeCommand.register_subcommand(_UpperCAmelCase )
UserCommands.register_subcommand(_UpperCAmelCase )
AddNewModelCommand.register_subcommand(_UpperCAmelCase )
AddNewModelLikeCommand.register_subcommand(_UpperCAmelCase )
LfsCommands.register_subcommand(_UpperCAmelCase )
PTtoTFCommand.register_subcommand(_UpperCAmelCase )
# Let's go
__magic_name__ : List[str] = parser.parse_args()
if not hasattr(_UpperCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
__magic_name__ : Dict = args.func(_UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 351 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Optional[int] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : List[str] = 0
while number > 0:
__magic_name__ : str = number % 10
sum_of_digits += last_digit
__magic_name__ : Optional[int] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowerCamelCase ( lowerCAmelCase : int = 100 ):
"""simple docstring"""
__magic_name__ : int = factorial(lowerCAmelCase )
__magic_name__ : Any = split_and_add(lowerCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 275 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ : List[Any] = logging.get_logger(__name__)
def UpperCamelCase( __UpperCamelCase : List[str] ):
lowerCAmelCase_ : Any = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
lowerCAmelCase_ : Any = 1024
lowerCAmelCase_ : Tuple = 4096
lowerCAmelCase_ : List[Any] = 24
lowerCAmelCase_ : int = 16
lowerCAmelCase_ : Tuple = [5, 11, 17, 23]
lowerCAmelCase_ : Optional[Any] = [256, 512, 1024, 1024]
lowerCAmelCase_ : str = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCAmelCase_ : Union[str, Any] = 768
lowerCAmelCase_ : int = [1, 1, 1, 0.5]
lowerCAmelCase_ : List[str] = [256, 512, 768, 768]
lowerCAmelCase_ : int = 150
lowerCAmelCase_ : Any = 16
lowerCAmelCase_ : Any = (1, 384, 384)
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Union[str, Any] = '''project'''
if "ade" in checkpoint_url:
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Any = 768
lowerCAmelCase_ : Optional[int] = [1, 1, 1, 0.5]
lowerCAmelCase_ : Union[str, Any] = 150
lowerCAmelCase_ : Any = 16
lowerCAmelCase_ : Any = '''huggingface/label-files'''
lowerCAmelCase_ : Any = '''ade20k-id2label.json'''
lowerCAmelCase_ : str = json.load(open(cached_download(hf_hub_url(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ) ,'''r''' ) )
lowerCAmelCase_ : Optional[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = idalabel
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : List[Any] = [1, 150, 480, 480]
return config, expected_shape
def UpperCamelCase( __UpperCamelCase : Optional[int] ):
lowerCAmelCase_ : int = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : int ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase_ : Dict = name.replace('''pretrained.model''' ,'''dpt.encoder''' )
if "pretrained.model" in name:
lowerCAmelCase_ : List[Any] = name.replace('''pretrained.model''' ,'''dpt.embeddings''' )
if "patch_embed" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''patch_embed''' ,'''''' )
if "pos_embed" in name:
lowerCAmelCase_ : Dict = name.replace('''pos_embed''' ,'''position_embeddings''' )
if "attn.proj" in name:
lowerCAmelCase_ : Any = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "proj" in name and "project" not in name:
lowerCAmelCase_ : Tuple = name.replace('''proj''' ,'''projection''' )
if "blocks" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''blocks''' ,'''layer''' )
if "mlp.fc1" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "norm1" in name and "backbone" not in name:
lowerCAmelCase_ : List[str] = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''norm2''' ,'''layernorm_after''' )
if "scratch.output_conv" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''scratch.output_conv''' ,'''head''' )
if "scratch" in name:
lowerCAmelCase_ : Dict = name.replace('''scratch''' ,'''neck''' )
if "layer1_rn" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''layer1_rn''' ,'''convs.0''' )
if "layer2_rn" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''layer2_rn''' ,'''convs.1''' )
if "layer3_rn" in name:
lowerCAmelCase_ : List[Any] = name.replace('''layer3_rn''' ,'''convs.2''' )
if "layer4_rn" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''layer4_rn''' ,'''convs.3''' )
if "refinenet" in name:
lowerCAmelCase_ : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase_ : Dict = name.replace(f"""refinenet{layer_idx}""" ,f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowerCAmelCase_ : int = name.replace('''out_conv''' ,'''projection''' )
if "resConfUnit1" in name:
lowerCAmelCase_ : Dict = name.replace('''resConfUnit1''' ,'''residual_layer1''' )
if "resConfUnit2" in name:
lowerCAmelCase_ : str = name.replace('''resConfUnit2''' ,'''residual_layer2''' )
if "conv1" in name:
lowerCAmelCase_ : str = name.replace('''conv1''' ,'''convolution1''' )
if "conv2" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''conv2''' ,'''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase_ : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' ,'''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''pretrained.act_postprocess2.0.project.0''' ,'''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase_ : Tuple = name.replace('''pretrained.act_postprocess3.0.project.0''' ,'''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase_ : Dict = name.replace('''pretrained.act_postprocess4.0.project.0''' ,'''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase_ : List[Any] = name.replace('''pretrained.act_postprocess1.3''' ,'''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase_ : Dict = name.replace('''pretrained.act_postprocess1.4''' ,'''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase_ : List[Any] = name.replace('''pretrained.act_postprocess2.3''' ,'''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.act_postprocess2.4''' ,'''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''pretrained.act_postprocess3.3''' ,'''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase_ : List[str] = name.replace('''pretrained.act_postprocess4.3''' ,'''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('''pretrained.act_postprocess4.4''' ,'''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
lowerCAmelCase_ : Tuple = name.replace('''pretrained''' ,'''dpt''' )
if "bn" in name:
lowerCAmelCase_ : Dict = name.replace('''bn''' ,'''batch_norm''' )
if "head" in name:
lowerCAmelCase_ : Any = name.replace('''head''' ,'''head.head''' )
if "encoder.norm" in name:
lowerCAmelCase_ : Tuple = name.replace('''encoder.norm''' ,'''layernorm''' )
if "auxlayer" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''auxlayer''' ,'''auxiliary_head.head''' )
if "backbone" in name:
lowerCAmelCase_ : List[Any] = name.replace('''backbone''' ,'''backbone.bit.encoder''' )
if ".." in name:
lowerCAmelCase_ : List[Any] = name.replace('''..''' ,'''.''' )
if "stem.conv" in name:
lowerCAmelCase_ : str = name.replace('''stem.conv''' ,'''bit.embedder.convolution''' )
if "blocks" in name:
lowerCAmelCase_ : List[str] = name.replace('''blocks''' ,'''layers''' )
if "convolution" in name and "backbone" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''convolution''' ,'''conv''' )
if "layer" in name and "backbone" in name:
lowerCAmelCase_ : Optional[int] = name.replace('''layer''' ,'''layers''' )
if "backbone.bit.encoder.bit" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('''backbone.bit.encoder.bit''' ,'''backbone.bit''' )
if "embedder.conv" in name:
lowerCAmelCase_ : str = name.replace('''embedder.conv''' ,'''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
lowerCAmelCase_ : Dict = name.replace('''backbone.bit.encoder.stem.norm''' ,'''backbone.bit.embedder.norm''' )
return name
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : Dict = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowerCAmelCase_ : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : str = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase_ : str = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def UpperCamelCase( ):
lowerCAmelCase_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase_ : Dict = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : Dict ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : str ):
lowerCAmelCase_ , lowerCAmelCase_ : Any = get_dpt_config(__UpperCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCAmelCase_ : List[str] = torch.load(__UpperCamelCase ,map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__UpperCamelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase_ : Any = state_dict.pop(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(__UpperCamelCase ,__UpperCamelCase )
# load HuggingFace model
lowerCAmelCase_ : List[Any] = DPTForSemanticSegmentation(__UpperCamelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
# Check outputs on an image
lowerCAmelCase_ : Tuple = 480 if '''ade''' in checkpoint_url else 384
lowerCAmelCase_ : Optional[int] = DPTImageProcessor(size=__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = prepare_img()
lowerCAmelCase_ : str = image_processor(__UpperCamelCase ,return_tensors='''pt''' )
# forward pass
lowerCAmelCase_ : Tuple = model(**__UpperCamelCase ).logits if '''ade''' in checkpoint_url else model(**__UpperCamelCase ).predicted_depth
if show_prediction:
lowerCAmelCase_ : Optional[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) ,size=(image.size[1], image.size[0]) ,mode='''bicubic''' ,align_corners=__UpperCamelCase ,)
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
A__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
A__ : Union[str, Any] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 103 |
'''simple docstring'''
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
A__ : str = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
A__ : Dict = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
A__ : int = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
A__ : Tuple = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
A__ : Tuple = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ) -> List[str]:
for tf_name, hf_name in patterns:
__lowerCamelCase : Optional[int] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
return k
def UpperCAmelCase__ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : dict ) -> BigBirdPegasusForConditionalGeneration:
__lowerCamelCase : int = BigBirdPegasusConfig(**UpperCAmelCase_ )
__lowerCamelCase : Any = BigBirdPegasusForConditionalGeneration(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = torch_model.state_dict()
__lowerCamelCase : Tuple = {}
# separating decoder weights
__lowerCamelCase : Dict = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
__lowerCamelCase : str = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
__lowerCamelCase : Tuple = [k.endswith(UpperCAmelCase_ ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase_ ):
continue
__lowerCamelCase : Tuple = DECODER_PATTERNS
__lowerCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ , UpperCAmelCase_ )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__lowerCamelCase : Union[str, Any] = v.T
__lowerCamelCase : str = torch.from_numpy(UpperCAmelCase_ )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
__lowerCamelCase : Optional[Any] = [k.endswith(UpperCAmelCase_ ) for ending in KEYS_TO_IGNORE]
if any(UpperCAmelCase_ ):
continue
__lowerCamelCase : Dict = REMAINING_PATTERNS
__lowerCamelCase : List[str] = rename_state_dict_key(UpperCAmelCase_ , UpperCAmelCase_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
__lowerCamelCase : List[str] = v.T
__lowerCamelCase : List[str] = torch.from_numpy(UpperCAmelCase_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__lowerCamelCase : Any = mapping['model.embed_positions.weight']
__lowerCamelCase : Union[str, Any] = mapping.pop('model.embed_positions.weight' )
__lowerCamelCase , __lowerCamelCase : List[str] = torch_model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
__lowerCamelCase : Any = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> Dict:
__lowerCamelCase : int = tf.train.list_variables(UpperCAmelCase_ )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : List[Any] = ['global_step']
for name, shape in tqdm(UpperCAmelCase_ , desc='converting tf checkpoint to dict' ):
__lowerCamelCase : Any = any(pat in name for pat in ignore_name )
if skip_key:
continue
__lowerCamelCase : Dict = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = array
return tf_weights
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : dict ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = get_tf_weights_as_numpy(UpperCAmelCase_ )
__lowerCamelCase : List[str] = convert_bigbird_pegasus(UpperCAmelCase_ , UpperCAmelCase_ )
torch_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : Any = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
A__ : List[str] = parser.parse_args()
A__ : Optional[int] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 185 | 0 |
'''simple docstring'''
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=2 , __a=56 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=2 , __a=7 , __a="gelu_new" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=4 , __a="block_sparse" , __a=True , __a=False , __a=2 , __a=3 , ):
'''simple docstring'''
__a : List[Any] = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = seq_length
__a : Optional[int] = is_training
__a : Optional[Any] = use_attention_mask
__a : Dict = use_token_type_ids
__a : Any = use_labels
__a : List[str] = vocab_size
__a : List[Any] = hidden_size
__a : Tuple = num_hidden_layers
__a : Any = num_attention_heads
__a : int = intermediate_size
__a : List[Any] = hidden_act
__a : Optional[Any] = hidden_dropout_prob
__a : int = attention_probs_dropout_prob
__a : Optional[Any] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Tuple = type_sequence_label_size
__a : Any = initializer_range
__a : List[Any] = num_choices
__a : str = rescale_embeddings
__a : Any = attention_type
__a : Optional[int] = use_bias
__a : Optional[Any] = block_size
__a : Any = num_random_blocks
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_attention_mask:
__a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__a : Any = None
if self.use_token_type_ids:
__a : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : str = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.prepare_config_and_inputs()
__a : List[str] = config_and_inputs
__a : Optional[int] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
A_ = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_hidden_states_output()
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a : List[str] = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(__A )
def __UpperCAmelCase ( self ):
'''simple docstring'''
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a : Any = self._prepare_for_class(__A , __A )
__a : Optional[Any] = model_class(__A )
@jax.jit
def model_jitted(__a , __a=None , **__a ):
return model(input_ids=__A , attention_mask=__A , **__A )
with self.subTest('JIT Enabled' ):
__a : List[Any] = model_jitted(**__A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__a : str = model_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) )
for jitted_output, output in zip(__A , __A ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self , __a , __a , __a , __a=1E-5 , __a="outputs" , __a=None ):
'''simple docstring'''
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(__A , __A , __A , __A , __A , __A )
| 361 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : int = parent
__a : Union[str, Any] = batch_size
__a : Optional[int] = seq_length
__a : List[str] = is_training
__a : Any = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : Any = use_labels
__a : List[str] = vocab_size
__a : str = hidden_size
__a : List[str] = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Tuple = hidden_act
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Optional[int] = max_position_embeddings
__a : Dict = type_vocab_size
__a : Any = type_sequence_label_size
__a : Dict = initializer_range
__a : Optional[Any] = num_labels
__a : Optional[Any] = num_choices
__a : Union[str, Any] = relative_attention
__a : List[str] = position_biased_input
__a : List[Any] = pos_att_type
__a : Tuple = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__a : Any = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
__a : int = None
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Dict = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
__a : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__a : str = model(__a , token_type_ids=__a )[0]
__a : Optional[int] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : int = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : List[Any] = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : Any = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__a : str = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__a : str = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__a : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = config_and_inputs
__a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = DebertaVaModelTester(self )
__a : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__a : Optional[Any] = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__a : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__a : str = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 294 | 0 |
'''simple docstring'''
import math
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = len(__A )
__UpperCamelCase = int(math.floor(math.sqrt(__A ) ) )
__UpperCamelCase = 0
while arr[min(__A ,__A ) - 1] < x:
__UpperCamelCase = step
step += int(math.floor(math.sqrt(__A ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__UpperCamelCase = prev + 1
if prev == min(__A ,__A ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
a__ : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
a__ : Dict = [int(item) for item in user_input.split(',')]
a__ : Optional[Any] = int(input('Enter the number to be searched:\n'))
a__ : List[str] = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f'''Number {x} is at index {res}''')
| 349 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase__ :
__SCREAMING_SNAKE_CASE = PegasusConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = '''gelu'''
def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=4_0 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def __lowerCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
__UpperCamelCase = TFPegasusModel(config=lowercase ).get_decoder()
__UpperCamelCase = inputs_dict["""input_ids"""]
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict["""attention_mask"""][:1, :]
__UpperCamelCase = inputs_dict["""head_mask"""]
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(lowercase , attention_mask=lowercase )[0]
__UpperCamelCase = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,__A=None ,__A=None ,__A=None ,):
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(__A ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFPegasusModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=lowercase )
def __lowerCamelCase ( self ) -> str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase__ ( unittest.TestCase):
__SCREAMING_SNAKE_CASE = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__SCREAMING_SNAKE_CASE = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__SCREAMING_SNAKE_CASE = '''google/pegasus-xsum'''
@cached_property
def __lowerCamelCase ( self ) -> int:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **lowercase ) -> Optional[int]:
__UpperCamelCase = self.translate_src_text(**lowercase )
assert self.expected_text == generated_words
def __lowerCamelCase ( self , **lowercase ) -> Optional[Any]:
__UpperCamelCase = self.tokenizer(self.src_text , **lowercase , padding=lowercase , return_tensors="""tf""" )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )
return generated_words
@slow
def __lowerCamelCase ( self ) -> Dict:
self._assert_generated_batch_equal_expected()
| 349 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def __lowercase ( _a , _a ):
if len(_a ) != 2 or len(a[0] ) != 2 or len(_a ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
snake_case_ : List[Any] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __lowercase ( _a , _a ):
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_a ) )
]
def __lowercase ( _a , _a ):
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_a ) )
]
def __lowercase ( _a ):
if len(_a ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
snake_case_ : List[str] = len(_a )
snake_case_ : int = matrix_length // 2
snake_case_ : Any = [[a[i][j] for j in range(_a , _a )] for i in range(_a )]
snake_case_ : Dict = [
[a[i][j] for j in range(_a , _a )] for i in range(_a , _a )
]
snake_case_ : int = [[a[i][j] for j in range(_a )] for i in range(_a )]
snake_case_ : Union[str, Any] = [[a[i][j] for j in range(_a )] for i in range(_a , _a )]
return top_left, top_right, bot_left, bot_right
def __lowercase ( _a ):
return len(_a ), len(matrix[0] )
def __lowercase ( _a ):
print('''\n'''.join(str(_a ) for line in matrix ) )
def __lowercase ( _a , _a ):
if matrix_dimensions(_a ) == (2, 2):
return default_matrix_multiplication(_a , _a )
snake_case_, snake_case_, snake_case_, snake_case_ : Dict = split_matrix(_a )
snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = split_matrix(_a )
snake_case_ : List[Any] = actual_strassen(_a , matrix_subtraction(_a , _a ) )
snake_case_ : Optional[int] = actual_strassen(matrix_addition(_a , _a ) , _a )
snake_case_ : Tuple = actual_strassen(matrix_addition(_a , _a ) , _a )
snake_case_ : Union[str, Any] = actual_strassen(_a , matrix_subtraction(_a , _a ) )
snake_case_ : Optional[int] = actual_strassen(matrix_addition(_a , _a ) , matrix_addition(_a , _a ) )
snake_case_ : Optional[int] = actual_strassen(matrix_subtraction(_a , _a ) , matrix_addition(_a , _a ) )
snake_case_ : Optional[int] = actual_strassen(matrix_subtraction(_a , _a ) , matrix_addition(_a , _a ) )
snake_case_ : Any = matrix_addition(matrix_subtraction(matrix_addition(_a , _a ) , _a ) , _a )
snake_case_ : Optional[int] = matrix_addition(_a , _a )
snake_case_ : List[Any] = matrix_addition(_a , _a )
snake_case_ : Optional[int] = matrix_subtraction(matrix_subtraction(matrix_addition(_a , _a ) , _a ) , _a )
# construct the new matrix from our 4 quadrants
snake_case_ : Union[str, Any] = []
for i in range(len(_a ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_a ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __lowercase ( _a , _a ):
if matrix_dimensions(_a )[1] != matrix_dimensions(_a )[0]:
snake_case_ : Dict = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
f"Matrix A: {matrixa}\n"
f"Matrix B: {matrixa}"
)
raise Exception(_a )
snake_case_ : Optional[int] = matrix_dimensions(_a )
snake_case_ : List[str] = matrix_dimensions(_a )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case_ : str = max(*_a , *_a )
snake_case_ : List[Any] = int(math.pow(2 , math.ceil(math.loga(_a ) ) ) )
snake_case_ : Dict = matrixa
snake_case_ : Any = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _a ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case_ : str = actual_strassen(_a , _a )
# Removing the additional zeros
for i in range(0 , _a ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _a ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowercase__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowercase__ : Optional[int] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 155 |
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowercase__ : str = get_logger(__name__)
lowercase__ : List[str] = Path(__file__).parent / '''model_card_template.md'''
lowercase__ : Union[str, Any] = uuida().hex
lowercase__ : Tuple = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Optional[int] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowercase__ : Optional[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __lowercase ( _a = None ):
snake_case_ : List[str] = f"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f"; torch/{_torch_version}"
if is_flax_available():
ua += f"; jax/{_jax_version}"
ua += f"; flax/{_flax_version}"
if is_onnx_available():
ua += f"; onnxruntime/{_onnxruntime_version}"
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_a , _a ):
ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items() )
elif isinstance(_a , _a ):
ua += "; " + user_agent
return ua
def __lowercase ( _a , _a = None , _a = None ):
if token is None:
snake_case_ : Union[str, Any] = HfFolder.get_token()
if organization is None:
snake_case_ : int = whoami(_a )['''name''']
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def __lowercase ( _a , _a ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(_a , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ : Union[str, Any] = args.hub_token if hasattr(_a , '''hub_token''' ) else None
snake_case_ : Dict = get_full_repo_name(_a , token=_a )
snake_case_ : List[str] = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_a , model_name=_a , repo_name=_a , dataset_name=args.dataset_name if hasattr(_a , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_a , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_a , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(_a , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_a , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_a , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_a , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_a , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_a , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(_a , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_a , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ : Tuple = os.path.join(args.output_dir , '''README.md''' )
model_card.save(_a )
def __lowercase ( _a , _a = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ : Tuple = str(Path(_a ).as_posix() )
snake_case_ : int = re.search(r'''snapshots/([^/]+)/''' , _a )
if search is None:
return None
snake_case_ : Dict = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_a ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowercase__ : str = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowercase__ : List[Any] = os.path.join(hf_cache_home, '''diffusers''')
def __lowercase ( _a = None , _a = None ):
if new_cache_dir is None:
snake_case_ : Tuple = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ : List[str] = old_diffusers_cache
snake_case_ : Union[str, Any] = Path(_a ).expanduser()
snake_case_ : str = Path(_a ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ : List[Any] = new_cache_dir / old_blob_path.relative_to(_a )
new_blob_path.parent.mkdir(parents=_a , exist_ok=_a )
os.replace(_a , _a )
try:
os.symlink(_a , _a )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowercase__ : Optional[Any] = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowercase__ : Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
lowercase__ : Optional[Any] = int(f.read())
except ValueError:
lowercase__ : Optional[Any] = 0
if cache_version < 1:
lowercase__ : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowercase__ : Optional[Any] = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
'''the directory exists and can be written to.'''
)
def __lowercase ( _a , _a = None ):
if variant is not None:
snake_case_ : str = weights_name.split('''.''' )
snake_case_ : Optional[Any] = splits[:-1] + [variant] + splits[-1:]
snake_case_ : List[Any] = '''.'''.join(_a )
return weights_name
def __lowercase ( _a , *,
_a , _a , _a , _a , _a , _a , _a , _a , _a , _a , _a=None , ):
snake_case_ : Dict = str(_a )
if os.path.isfile(_a ):
return pretrained_model_name_or_path
elif os.path.isdir(_a ):
if os.path.isfile(os.path.join(_a , _a ) ):
# Load from a PyTorch checkpoint
snake_case_ : Dict = os.path.join(_a , _a )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_a , _a , _a ) ):
snake_case_ : List[Any] = os.path.join(_a , _a , _a )
return model_file
else:
raise EnvironmentError(
f"Error no file named {weights_name} found in directory {pretrained_model_name_or_path}." )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_a ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ : str = hf_hub_download(
_a , filename=_add_variant(_a , _a ) , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
warnings.warn(
f"Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead." , _a , )
return model_file
except: # noqa: E722
warnings.warn(
f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_a , _a )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(_a , _a )}' so that the correct variant file can be added." , _a , )
try:
# 2. Load model file as usual
snake_case_ : Tuple = hf_hub_download(
_a , filename=_a , cache_dir=_a , force_download=_a , proxies=_a , resume_download=_a , local_files_only=_a , use_auth_token=_a , user_agent=_a , subfolder=_a , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
'''this model name. Check the model page at '''
f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." )
except EntryNotFoundError:
raise EnvironmentError(
f"{pretrained_model_name_or_path} does not appear to have a file named {weights_name}." )
except HTTPError as err:
raise EnvironmentError(
f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" )
except ValueError:
raise EnvironmentError(
f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"
f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"
f" directory containing a file named {weights_name} or"
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
f"containing a file named {weights_name}" )
| 155 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.