code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _snake_case :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ) -> int:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = relative_attention
snake_case_ = position_biased_input
snake_case_ = pos_att_type
snake_case_ = scope
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
snake_case_ = TFDebertaVaModel(config=a__ )
snake_case_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case_ = [input_ids, input_mask]
snake_case_ = model(a__ )
snake_case_ = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = TFDebertaVaForMaskedLM(config=a__ )
snake_case_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = TFDebertaVaForSequenceClassification(config=a__ )
snake_case_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = TFDebertaVaForTokenClassification(config=a__ )
snake_case_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case_ = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
snake_case_ = TFDebertaVaForQuestionAnswering(config=a__ )
snake_case_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case_ = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : str = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : Optional[int] = (
{
"feature-extraction": TFDebertaVaModel,
"fill-mask": TFDebertaVaForMaskedLM,
"question-answering": TFDebertaVaForQuestionAnswering,
"text-classification": TFDebertaVaForSequenceClassification,
"token-classification": TFDebertaVaForTokenClassification,
"zero-shot": TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : Optional[int] = False
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = TFDebertaVaModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , hidden_size=37 )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
@slow
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(a__ )
@require_tf
class _snake_case ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
snake_case_ = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
snake_case_ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case_ = model(a__ , attention_mask=a__ )[0]
snake_case_ = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a__ , atol=1e-4 )
| 85 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , ) -> int:
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
def __A ( self ) -> Optional[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ImageGPTImageProcessor if is_vision_available() else None
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = ImageGPTImageProcessingTester(self )
@property
def __A ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'clusters' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCAmelCase__ )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = os.path.join(lowerCAmelCase__ , 'image_processor.json' )
image_processor_first.to_json_file(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_json_file(lowerCAmelCase__ ).to_dict()
SCREAMING_SNAKE_CASE = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_pretrained(lowerCAmelCase__ ).to_dict()
SCREAMING_SNAKE_CASE = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCAmelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCAmelCase__ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def __A ( self ) -> Optional[Any]:
pass
def lowercase () -> Union[str, Any]:
SCREAMING_SNAKE_CASE = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
SCREAMING_SNAKE_CASE = Image.open(dataset[4]['file'] )
SCREAMING_SNAKE_CASE = Image.open(dataset[5]['file'] )
SCREAMING_SNAKE_CASE = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
SCREAMING_SNAKE_CASE = prepare_images()
# test non-batched
SCREAMING_SNAKE_CASE = image_processing(images[0] , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
SCREAMING_SNAKE_CASE = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCAmelCase__ )
# test batched
SCREAMING_SNAKE_CASE = image_processing(lowerCAmelCase__ , return_tensors='pt' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
SCREAMING_SNAKE_CASE = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCAmelCase__ )
| 113 | 0 |
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE : List[Any] = "\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n"
SCREAMING_SNAKE_CASE : Tuple = "\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results[\"exact_match\"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]\n >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results[\"exact_match\"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric(\"exact_match\")\n >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]\n >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results[\"exact_match\"], 1))\n 33.3\n\n"
SCREAMING_SNAKE_CASE : Optional[Any] = "\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowercase_ :Any = np.array([re.sub(UpperCamelCase_ , '''''' , UpperCamelCase_ ) for x in predictions] )
lowercase_ :Dict = np.array([re.sub(UpperCamelCase_ , '''''' , UpperCamelCase_ ) for x in references] )
else:
lowercase_ :Optional[int] = np.asarray(UpperCamelCase_ )
lowercase_ :List[str] = np.asarray(UpperCamelCase_ )
if ignore_case:
lowercase_ :List[str] = np.char.lower(UpperCamelCase_ )
lowercase_ :List[str] = np.char.lower(UpperCamelCase_ )
if ignore_punctuation:
lowercase_ :Optional[Any] = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
lowercase_ :Union[str, Any] = np.char.translate(UpperCamelCase_ , table=UpperCamelCase_ )
lowercase_ :Tuple = np.char.translate(UpperCamelCase_ , table=UpperCamelCase_ )
if ignore_numbers:
lowercase_ :Tuple = string.digits.maketrans('''''' , '''''' , string.digits )
lowercase_ :str = np.char.translate(UpperCamelCase_ , table=UpperCamelCase_ )
lowercase_ :str = np.char.translate(UpperCamelCase_ , table=UpperCamelCase_ )
lowercase_ :Dict = predictions == references
return {"exact_match": np.mean(UpperCamelCase_ ) * 100}
| 252 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCamelCase :
'''simple docstring'''
lowercase : Dict =MBartConfig
lowercase : Union[str, Any] ={}
lowercase : Optional[int] ="""gelu"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=20 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
lowercase_ :int = parent
lowercase_ :Any = batch_size
lowercase_ :Any = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Optional[Any] = use_labels
lowercase_ :List[str] = vocab_size
lowercase_ :Union[str, Any] = hidden_size
lowercase_ :Optional[Any] = num_hidden_layers
lowercase_ :Optional[int] = num_attention_heads
lowercase_ :Any = intermediate_size
lowercase_ :str = hidden_dropout_prob
lowercase_ :List[Any] = attention_probs_dropout_prob
lowercase_ :Union[str, Any] = max_position_embeddings
lowercase_ :str = eos_token_id
lowercase_ :List[Any] = pad_token_id
lowercase_ :List[str] = bos_token_id
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase_ :Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ :Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase_ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ :Optional[Any] = prepare_mbart_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Tuple = TFMBartModel(config=UpperCamelCase_ ).get_decoder()
lowercase_ :Any = inputs_dict['''input_ids''']
lowercase_ :List[Any] = input_ids[:1, :]
lowercase_ :List[Any] = inputs_dict['''attention_mask'''][:1, :]
lowercase_ :str = inputs_dict['''head_mask''']
lowercase_ :List[str] = 1
# first forward pass
lowercase_ :Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
lowercase_ , lowercase_ :int = outputs.to_tuple()
lowercase_ :List[Any] = past_key_values[1]
def UpperCamelCase ( _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
lowercase_ :Dict = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ :Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ :Any = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ :Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ :Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[Any] =(TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase : Optional[Any] =(TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase : Optional[Any] =(
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : Optional[Any] =True
lowercase : Optional[Any] =False
lowercase : List[str] =False
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = TFMBartModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=UpperCamelCase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
lowercase_ :str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : List[str] =[
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowercase : Optional[int] =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowercase : Any ="""facebook/mbart-large-en-ro"""
@cached_property
def UpperCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self ):
lowercase_ :Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Any = self.translate_src_text(**UpperCamelCase_ )
self.assertListEqual(self.expected_text , UpperCamelCase_ )
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Optional[Any] = self.tokenizer(self.src_text , **UpperCamelCase_ , return_tensors='''tf''' )
lowercase_ :Union[str, Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowercase_ :Any = self.tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
return generated_words
@slow
def UpperCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 252 | 1 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCamelCase : int =get_logger(__name__)
class __a ( enum.Enum ):
_lowerCAmelCase : Optional[int] = """all_checks"""
_lowerCAmelCase : List[str] = """basic_checks"""
_lowerCAmelCase : List[Any] = """no_checks"""
class __a ( _snake_case ):
pass
class __a ( _snake_case ):
pass
class __a ( _snake_case ):
pass
class __a ( _snake_case ):
pass
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> int:
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) )
if len(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) )
UpperCamelCase__ : Optional[Any] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCamelCase__ : Optional[int] = ' for ' + verification_name if verification_name is not None else ''
if len(UpperCAmelCase_ ) > 0:
raise NonMatchingChecksumError(
f'Checksums didn\'t match{for_verification_name}:\n'
f'{bad_urls}\n'
"Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class __a ( _snake_case ):
pass
class __a ( _snake_case ):
pass
class __a ( _snake_case ):
pass
class __a ( _snake_case ):
pass
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) > 0:
raise ExpectedMoreSplits(str(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) )
if len(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) > 0:
raise UnexpectedSplits(str(set(UpperCAmelCase_ ) - set(UpperCAmelCase_ ) ) )
UpperCamelCase__ : int = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(UpperCAmelCase_ ) > 0:
raise NonMatchingSplitsSizesError(str(UpperCAmelCase_ ) )
logger.info("All the splits matched successfully." )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = True ) -> Optional[int]:
if record_checksum:
UpperCamelCase__ : Dict = shaaaa()
with open(UpperCAmelCase_ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(UpperCAmelCase_ )
UpperCamelCase__ : str = m.hexdigest()
else:
UpperCamelCase__ : Optional[Any] = None
return {"num_bytes": os.path.getsize(UpperCAmelCase_ ), "checksum": checksum}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Tuple:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False | 189 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase( UpperCAmelCase_ ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , UpperCAmelCase_ , )
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase : List[str] = [image]
if isinstance(image[0] , PIL.Image.Image ):
UpperCAmelCase , UpperCAmelCase : List[str] = image[0].size
UpperCAmelCase , UpperCAmelCase : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
UpperCAmelCase : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
UpperCAmelCase : Optional[int] = np.concatenate(UpperCAmelCase_ , axis=0 )
UpperCAmelCase : List[Any] = np.array(UpperCAmelCase_ ).astype(np.floataa ) / 255.0
UpperCAmelCase : Any = image.transpose(0 , 3 , 1 , 2 )
UpperCAmelCase : Any = 2.0 * image - 1.0
UpperCAmelCase : List[str] = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(image[0] , torch.Tensor ):
UpperCAmelCase : List[Any] = torch.cat(UpperCAmelCase_ , dim=0 )
return image
def UpperCamelCase( UpperCAmelCase_ ):
if isinstance(UpperCAmelCase_ , torch.Tensor ):
return mask
elif isinstance(UpperCAmelCase_ , PIL.Image.Image ):
UpperCAmelCase : List[str] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
UpperCAmelCase , UpperCAmelCase : List[Any] = mask[0].size
UpperCAmelCase , UpperCAmelCase : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCAmelCase : Tuple = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
UpperCAmelCase : Optional[int] = np.concatenate(UpperCAmelCase_ , axis=0 )
UpperCAmelCase : Any = mask.astype(np.floataa ) / 255.0
UpperCAmelCase : str = 0
UpperCAmelCase : Dict = 1
UpperCAmelCase : Optional[Any] = torch.from_numpy(UpperCAmelCase_ )
elif isinstance(mask[0] , torch.Tensor ):
UpperCAmelCase : List[str] = torch.cat(UpperCAmelCase_ , dim=0 )
return mask
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : UNetaDModel
UpperCAmelCase_ : RePaintScheduler
def __init__( self : List[str] , lowercase_ : List[str] , lowercase_ : Tuple ) -> Tuple:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : Union[torch.Tensor, PIL.Image.Image] , lowercase_ : Union[torch.Tensor, PIL.Image.Image] , lowercase_ : int = 250 , lowercase_ : float = 0.0 , lowercase_ : int = 10 , lowercase_ : int = 10 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
UpperCAmelCase : Dict = image
UpperCAmelCase : Optional[int] = _preprocess_image(lowercase_ )
UpperCAmelCase : Optional[Any] = original_image.to(device=self.device , dtype=self.unet.dtype )
UpperCAmelCase : Optional[Any] = _preprocess_mask(lowercase_ )
UpperCAmelCase : List[str] = mask_image.to(device=self.device , dtype=self.unet.dtype )
UpperCAmelCase : Any = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase : List[str] = original_image.shape
UpperCAmelCase : str = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowercase_ , lowercase_ , lowercase_ , self.device )
UpperCAmelCase : Tuple = eta
UpperCAmelCase : Optional[int] = self.scheduler.timesteps[0] + 1
UpperCAmelCase : List[Any] = generator[0] if isinstance(lowercase_ , lowercase_ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
UpperCAmelCase : str = self.unet(lowercase_ , lowercase_ ).sample
# compute previous image: x_t -> x_t-1
UpperCAmelCase : Dict = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
UpperCAmelCase : int = self.scheduler.undo_step(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Union[str, Any] = t
UpperCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase : List[str] = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
| 151 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
return (data["data"], data["target"])
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Optional[int]:
"""simple docstring"""
snake_case_ = XGBClassifier()
classifier.fit(_UpperCAmelCase , _UpperCAmelCase )
return classifier
def __lowerCAmelCase ()-> Optional[int]:
"""simple docstring"""
snake_case_ = load_iris()
snake_case_ = data_handling(_UpperCAmelCase )
snake_case_ = train_test_split(
_UpperCAmelCase , _UpperCAmelCase , test_size=0.2_5 )
snake_case_ = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case_ = xgboost(_UpperCAmelCase , _UpperCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , display_labels=_UpperCAmelCase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 365 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableDiffusionInpaintPipeline
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__snake_case = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__snake_case = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__snake_case = frozenset([] )
def UpperCamelCase__ ( self ):
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_UpperCAmelCase , )
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
snake_case_ = CLIPTextModel(_UpperCAmelCase )
snake_case_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case_ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('''RGB''' ).resize((64, 64) )
snake_case_ = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCAmelCase )
else:
snake_case_ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase__ ( self ):
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
snake_case_ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case_ = self.get_dummy_inputs(_UpperCAmelCase )
snake_case_ = sd_pipe(**_UpperCAmelCase ).images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCamelCase__ ( self ):
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type='''np''' , )
snake_case_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
snake_case_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
snake_case_ = '''stabilityai/stable-diffusion-2-inpainting'''
snake_case_ = PNDMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , scheduler=_UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ = '''Face of a yellow cat, high resolution, sitting on a park bench'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type='''np''' , )
snake_case_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9 | 267 | 0 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Dict = 1 , lowerCAmelCase__ : Any = 1000 ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Union[str, Any] = 0
for divide_by_number in range(__lowerCAmelCase , digit + 1 ):
lowerCAmelCase_ : list[int] = []
lowerCAmelCase_ : Dict = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowerCAmelCase ):
lowerCAmelCase_ : str = len(__lowerCAmelCase )
lowerCAmelCase_ : Tuple = divide_by_number
else:
has_been_divided.append(__lowerCAmelCase )
lowerCAmelCase_ : int = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ = logging.getLogger()
lowerCamelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[int] ) ->Tuple:
'''simple docstring'''
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = {"source": "What is love ?", "target": "life"}
_UpperCAmelCase : Any = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase : Dict = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCamelCase__ , F"""{split}.{field}""" ) , "w" ) as f:
f.write(lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : int , lowerCamelCase__ : str = "pytorch" ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : int = os.path.join(lowerCamelCase__ , "output" )
_UpperCAmelCase : Tuple = os.path.join(lowerCamelCase__ , "data" )
self._create_dummy_data(data_dir=lowerCamelCase__ )
_UpperCAmelCase : str = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
_UpperCAmelCase : str = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCamelCase__ , env=self.get_env() )
_UpperCAmelCase : Optional[int] = os.path.join(lowerCamelCase__ , "metrics.json" )
with open(lowerCamelCase__ ) as f:
_UpperCAmelCase : Dict = json.load(lowerCamelCase__ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 234 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
snake_case_ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
snake_case_ = {
'allenai/led-base-16384': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase__ ( ) -> List[Any]:
__snake_case = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__snake_case = bs[:]
__snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
__snake_case = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( snake_case_ : str ) -> List[Any]:
__snake_case = set()
__snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__snake_case = char
return pairs
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = VOCAB_FILES_NAMES
A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[Any] = ['input_ids', 'attention_mask']
def __init__(self : int , a__ : List[Any] , a__ : Dict , a__ : Optional[Any]="replace" , a__ : Dict="<s>" , a__ : Optional[int]="</s>" , a__ : int="</s>" , a__ : Optional[int]="<s>" , a__ : str="<unk>" , a__ : List[str]="<pad>" , a__ : Any="<mask>" , a__ : Union[str, Any]=False , **a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
super().__init__(
errors=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , **a__ , )
with open(a__ , encoding='''utf-8''' ) as vocab_handle:
__snake_case = json.load(a__ )
__snake_case = {v: k for k, v in self.encoder.items()}
__snake_case = errors # how to handle errors in decoding
__snake_case = bytes_to_unicode()
__snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(a__ , encoding='''utf-8''' ) as merges_handle:
__snake_case = merges_handle.read().split('''\n''' )[1:-1]
__snake_case = [tuple(merge.split() ) for merge in bpe_merges]
__snake_case = dict(zip(a__ , range(len(a__ ) ) ) )
__snake_case = {}
__snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def a (self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def a (self : Dict ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def a (self : Dict , a__ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__snake_case = tuple(a__ )
__snake_case = get_pairs(a__ )
if not pairs:
return token
while True:
__snake_case = min(a__ , key=lambda a__ : self.bpe_ranks.get(a__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__snake_case , __snake_case = bigram
__snake_case = []
__snake_case = 0
while i < len(a__ ):
try:
__snake_case = word.index(a__ , a__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__snake_case = j
if word[i] == first and i < len(a__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__snake_case = tuple(a__ )
__snake_case = new_word
if len(a__ ) == 1:
break
else:
__snake_case = get_pairs(a__ )
__snake_case = ''' '''.join(a__ )
__snake_case = word
return word
def a (self : str , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = []
for token in re.findall(self.pat , a__ ):
__snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a__ ).split(''' ''' ) )
return bpe_tokens
def a (self : str , a__ : List[Any] ):
"""simple docstring"""
return self.encoder.get(a__ , self.encoder.get(self.unk_token ) )
def a (self : Dict , a__ : Tuple ):
"""simple docstring"""
return self.decoder.get(a__ )
def a (self : List[str] , a__ : Any ):
"""simple docstring"""
__snake_case = ''''''.join(a__ )
__snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def a (self : List[Any] , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(a__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__ ) + '''\n''' )
__snake_case = 0
with open(a__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__snake_case = token_index
writer.write(''' '''.join(a__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def a (self : Optional[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a (self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def a (self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a (self : Any , a__ : Any , a__ : str=False , **a__ : Any ):
"""simple docstring"""
__snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a__ ) > 0 and not text[0].isspace()):
__snake_case = ''' ''' + text
return (text, kwargs)
def a (self : List[str] , a__ : Union[Dict[str, EncodedInput], BatchEncoding] , a__ : Optional[int] = None , a__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , a__ : Optional[int] = None , a__ : Optional[bool] = None , ):
"""simple docstring"""
__snake_case = super()._pad(
encoded_inputs=a__ , max_length=a__ , padding_strategy=a__ , pad_to_multiple_of=a__ , return_attention_mask=a__ , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(a__ )
if needs_to_be_padded:
__snake_case = len(a__ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 357 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''' , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case , __snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case = controlnet_params
__snake_case = '''bird'''
__snake_case = jax.device_count()
__snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
__snake_case = pipe.prepare_image_inputs([canny_image] * num_samples )
__snake_case = jax.random.PRNGKey(0 )
__snake_case = jax.random.split(a__ , jax.device_count() )
__snake_case = replicate(a__ )
__snake_case = shard(a__ )
__snake_case = shard(a__ )
__snake_case = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case = images[0, 253:256, 253:256, -1]
__snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def a (self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''' , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case , __snake_case = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , controlnet=a__ , from_pt=a__ , dtype=jnp.bfloataa )
__snake_case = controlnet_params
__snake_case = '''Chef in the kitchen'''
__snake_case = jax.device_count()
__snake_case = pipe.prepare_text_inputs([prompts] * num_samples )
__snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
__snake_case = pipe.prepare_image_inputs([pose_image] * num_samples )
__snake_case = jax.random.PRNGKey(0 )
__snake_case = jax.random.split(a__ , jax.device_count() )
__snake_case = replicate(a__ )
__snake_case = shard(a__ )
__snake_case = shard(a__ )
__snake_case = pipe(
prompt_ids=a__ , image=a__ , params=a__ , prng_seed=a__ , num_inference_steps=50 , jit=a__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__snake_case = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case = images[0, 253:256, 253:256, -1]
__snake_case = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 238 | 0 |
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : List[str] = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
lowercase_ : str = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
lowercase_ : Optional[int] = '''-''' if number.startswith('''-''' ) else ''''''
lowercase_ : Union[str, Any] = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return F'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 93 |
"""simple docstring"""
from __future__ import annotations
import math
_lowercase = '''2020.9.26'''
_lowercase = '''xcodz-dot, cclaus, dhruvmanila'''
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float ):
if not all(isinstance(snake_case__ , (float, int) ) for val in locals().values() ):
A = F'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(snake_case__ )
A = ((x * distance) / (z + distance)) * scale
A = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : str , snake_case__ : float ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('Axis must be a str' )
A = locals()
del input_variables["axis"]
if not all(isinstance(snake_case__ , (float, int) ) for val in input_variables.values() ):
A = (
'Input values except axis must either be float or int: '
F'{list(input_variables.values() )}'
)
raise TypeError(snake_case__ )
A = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
A = x * math.cos(snake_case__ ) - y * math.sin(snake_case__ )
A = y * math.cos(snake_case__ ) + x * math.sin(snake_case__ )
A = z
elif axis == "x":
A = y * math.cos(snake_case__ ) - z * math.sin(snake_case__ )
A = z * math.cos(snake_case__ ) + y * math.sin(snake_case__ )
A = x
elif axis == "y":
A = x * math.cos(snake_case__ ) - z * math.sin(snake_case__ )
A = z * math.cos(snake_case__ ) + x * math.sin(snake_case__ )
A = y
else:
raise ValueError('not a valid axis, choose one of \'x\', \'y\', \'z\'' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""") | 74 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=snake_case__ , vae=snake_case__ , scheduler=snake_case__ )
# create a imagenet -> id dictionary for easier use
lowercase__ : int= {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ : Tuple= int(snake_case__ )
lowercase__ : Union[str, Any]= dict(sorted(self.labels.items() ) )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
lowercase__ : List[Any]= list(snake_case__ )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , snake_case__ , snake_case__ = 4.0 , snake_case__ = None , snake_case__ = 50 , snake_case__ = "pil" , snake_case__ = True , ):
'''simple docstring'''
lowercase__ : List[Any]= len(snake_case__ )
lowercase__ : Optional[int]= self.transformer.config.sample_size
lowercase__ : List[str]= self.transformer.config.in_channels
lowercase__ : Any= randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=snake_case__ , device=self.device , dtype=self.transformer.dtype , )
lowercase__ : Any= torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ : Tuple= torch.tensor(snake_case__ , device=self.device ).reshape(-1 )
lowercase__ : Any= torch.tensor([1000] * batch_size , device=self.device )
lowercase__ : Tuple= torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(snake_case__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ : List[str]= latent_model_input[: len(snake_case__ ) // 2]
lowercase__ : int= torch.cat([half, half] , dim=0 )
lowercase__ : Union[str, Any]= self.scheduler.scale_model_input(snake_case__ , snake_case__ )
lowercase__ : Optional[int]= t
if not torch.is_tensor(snake_case__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ : List[str]= latent_model_input.device.type == "mps"
if isinstance(snake_case__ , snake_case__ ):
lowercase__ : int= torch.floataa if is_mps else torch.floataa
else:
lowercase__ : Dict= torch.intaa if is_mps else torch.intaa
lowercase__ : Tuple= torch.tensor([timesteps] , dtype=snake_case__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ : Dict= timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ : int= timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ : Union[str, Any]= self.transformer(
snake_case__ , timestep=snake_case__ , class_labels=snake_case__ ).sample
# perform guidance
if guidance_scale > 1:
lowercase__, lowercase__ : Tuple= noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__, lowercase__ : Union[str, Any]= torch.split(snake_case__ , len(snake_case__ ) // 2 , dim=0 )
lowercase__ : str= uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ : Dict= torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ : Optional[int]= torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__, lowercase__ : Union[str, Any]= torch.split(snake_case__ , snake_case__ , dim=1 )
else:
lowercase__ : int= noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ : List[Any]= self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
if guidance_scale > 1:
lowercase__, lowercase__ : Any= latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ : str= latent_model_input
lowercase__ : Dict= 1 / self.vae.config.scaling_factor * latents
lowercase__ : Any= self.vae.decode(snake_case__ ).sample
lowercase__ : Tuple= (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ : List[Any]= samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ : Optional[Any]= self.numpy_to_pil(snake_case__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=snake_case__ )
| 150 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
a : List[Any] = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
a : Dict = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowercase__(A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : Any= torch.load(A , map_location="cpu" )
return sd
def lowercase__(A , A , A=rename_keys_prefix ) ->List[str]:
"""simple docstring"""
lowercase__ : int= OrderedDict()
lowercase__ : Optional[Any]= torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase__ : Union[str, Any]= key
for name_pair in rename_keys_prefix:
lowercase__ : str= new_key.replace(name_pair[0] , name_pair[1] )
lowercase__ : Union[str, Any]= d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase__ : Optional[int]= new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase__(A , A ) ->str:
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowercase__ : Union[str, Any]= "pretraining"
if "vcr" in checkpoint_path:
lowercase__ : str= {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
lowercase__ : Optional[Any]= {"visual_embedding_dim": 2_048}
elif "vqa" in checkpoint_path:
lowercase__ : int= {"visual_embedding_dim": 2_048}
elif "nlvr" in checkpoint_path:
lowercase__ : Tuple= {"visual_embedding_dim": 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowercase__ : int= {"visual_embedding_dim": 512}
lowercase__ : int= "multichoice"
elif "vqa_advanced" in checkpoint_path:
lowercase__ : Dict= {"visual_embedding_dim": 2_048}
lowercase__ : Optional[Any]= "vqa_advanced"
elif "vqa" in checkpoint_path:
lowercase__ : Optional[int]= {"visual_embedding_dim": 2_048, "num_labels": 3_129}
lowercase__ : List[str]= "vqa"
elif "nlvr" in checkpoint_path:
lowercase__ : Dict= {
"visual_embedding_dim": 1_024,
"num_labels": 2,
}
lowercase__ : Any= "nlvr"
lowercase__ : List[Any]= VisualBertConfig(**A )
# Load State Dict
lowercase__ : Union[str, Any]= load_state_dict(A )
lowercase__ : List[str]= get_new_dict(A , A )
if model_type == "pretraining":
lowercase__ : Optional[Any]= VisualBertForPreTraining(A )
elif model_type == "vqa":
lowercase__ : Any= VisualBertForQuestionAnswering(A )
elif model_type == "nlvr":
lowercase__ : Union[str, Any]= VisualBertForVisualReasoning(A )
elif model_type == "multichoice":
lowercase__ : str= VisualBertForMultipleChoice(A )
model.load_state_dict(A )
# Save Checkpoints
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
a : Dict = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 150 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 1 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowercase ( snake_case__):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_= pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
with self.assertRaises(__UpperCAmelCase ):
UpperCAmelCase_= pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_= pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase_= pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
UpperCAmelCase_= pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_= pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
UpperCAmelCase_= pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase_= pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
UpperCAmelCase_= pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
UpperCAmelCase_= pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
import PIL.Image
UpperCAmelCase_= PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCAmelCase ) as mock_cast_to_python_objects:
UpperCAmelCase_= pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
UpperCAmelCase_, UpperCAmelCase_= mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCAmelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def __a ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
UpperCAmelCase_= pa.BufferReader(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ ,pa.Buffer ) else pa.memory_map(lowerCAmelCase_ )
UpperCAmelCase_= pa.ipc.open_stream(lowerCAmelCase_ )
UpperCAmelCase_= f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __a ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= pa.BufferOutputStream()
UpperCAmelCase_= pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_ ,schema=lowerCAmelCase_ ,writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCAmelCase_, UpperCAmelCase_= writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase_= {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __a ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_= pa.BufferOutputStream()
UpperCAmelCase_= Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=lowerCAmelCase_ ,features=lowerCAmelCase_ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
UpperCAmelCase_, UpperCAmelCase_= writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCAmelCase_= pa.BufferReader(output.getvalue() )
UpperCAmelCase_= pa.ipc.open_stream(lowerCAmelCase_ )
UpperCAmelCase_= f.read_all()
UpperCAmelCase_= pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCAmelCase_ )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 10] )
def __a ( lowerCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_= pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_ ,writer_batch_size=lowerCAmelCase_ ,hash_salt="""split_name""" ,check_duplicates=lowerCAmelCase_ ,) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} ,key=[1, 2] )
UpperCAmelCase_, UpperCAmelCase_= writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 2, 10] )
def __a ( lowerCAmelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_ ,writer_batch_size=lowerCAmelCase_ ,hash_salt="""split_name""" ,check_duplicates=lowerCAmelCase_ ,) as writer:
with pytest.raises(lowerCAmelCase_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} ,key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} ,key=10 )
UpperCAmelCase_, UpperCAmelCase_= writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 2, 10] )
def __a ( lowerCAmelCase_ : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_= pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCAmelCase_ ,writer_batch_size=lowerCAmelCase_ ,hash_salt="""split_name""" ,check_duplicates=lowerCAmelCase_ ,) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} ,key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} ,key=2 )
UpperCAmelCase_, UpperCAmelCase_= writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_= pa.BufferOutputStream()
UpperCAmelCase_= pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_ ,schema=lowerCAmelCase_ ,writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
UpperCAmelCase_, UpperCAmelCase_= writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase_= {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __a ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_= pa.BufferOutputStream()
UpperCAmelCase_= pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_ ,schema=lowerCAmelCase_ ,writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
UpperCAmelCase_, UpperCAmelCase_= writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase_= {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" ,[None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" ,[None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def __a ( lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_= pa.BufferOutputStream()
UpperCAmelCase_= pa.schema(lowerCAmelCase_ ) if fields else None
with ArrowWriter(stream=lowerCAmelCase_ ,schema=lowerCAmelCase_ ,writer_batch_size=lowerCAmelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
UpperCAmelCase_, UpperCAmelCase_= writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase_= {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowerCAmelCase_ ,metadata=writer._schema.metadata )
_check_output(output.getvalue() ,expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __a ( ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_= {"""col_1""": pa.string(), """col_2""": pa.intaa()}
UpperCAmelCase_= os.path.join(lowerCAmelCase_ ,"""test.arrow""" )
with ArrowWriter(path=lowerCAmelCase_ ,schema=pa.schema(lowerCAmelCase_ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
UpperCAmelCase_, UpperCAmelCase_= writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCAmelCase_ ,metadata=writer._schema.metadata )
_check_output(lowerCAmelCase_ ,1 )
def __a ( lowerCAmelCase_ : Optional[Any] ) -> str:
'''simple docstring'''
if pa.types.is_list(lowerCAmelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : int ) -> Optional[int]:
'''simple docstring'''
if isinstance(lst[0] ,lowerCAmelCase_ ):
change_first_primitive_element_in_list(lst[0] ,lowerCAmelCase_ )
else:
UpperCAmelCase_= value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" ,[(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : str ,lowerCAmelCase_ : Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= pa.array(TypedSequence(lowerCAmelCase_ ,optimized_int_type=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" ,[
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] ,)
@pytest.mark.parametrize("""sequence""" ,[[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_= pa.array(OptimizedTypedSequence(lowerCAmelCase_ ,col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCAmelCase_= copy.deepcopy(lowerCAmelCase_ )
UpperCAmelCase_= np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCAmelCase_ ,lowerCAmelCase_ )
UpperCAmelCase_= pa.array(OptimizedTypedSequence(lowerCAmelCase_ ,col=lowerCAmelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" ,[False, True] )
def __a ( lowerCAmelCase_ : int ,lowerCAmelCase_ : Any ) -> str:
'''simple docstring'''
UpperCAmelCase_= str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=lowerCAmelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __a ( lowerCAmelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_= """mock://dataset-train.arrow"""
with ArrowWriter(path=lowerCAmelCase_ ,storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs ,type(lowerCAmelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCAmelCase_, UpperCAmelCase_= writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCAmelCase_ )
def __a ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_= pa.BufferOutputStream()
with ParquetWriter(stream=lowerCAmelCase_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
UpperCAmelCase_, UpperCAmelCase_= writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCAmelCase_= pa.BufferReader(output.getvalue() )
UpperCAmelCase_= pq.read_table(lowerCAmelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" ,[False, True] )
def __a ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
import PIL.Image
UpperCAmelCase_= str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) ,dtype=np.uinta ) ).save(lowerCAmelCase_ ,format="""png""" )
UpperCAmelCase_= pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCAmelCase_ ,features=Features({"""image""": Image()} ) ,embed_local_files=lowerCAmelCase_ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
UpperCAmelCase_= pa.BufferReader(output.getvalue() )
UpperCAmelCase_= pq.read_table(lowerCAmelCase_ )
UpperCAmelCase_= pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] ,lowerCAmelCase_ )
with open(lowerCAmelCase_ ,"""rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_= pa.schema([pa.field("""col_1""" ,pa.string() ,nullable=lowerCAmelCase_ )] )
UpperCAmelCase_= pa.BufferOutputStream()
with ArrowWriter(stream=lowerCAmelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCAmelCase_ )
assert writer._schema == pa.schema([pa.field("""col_1""" ,pa.string() )] )
| 277 |
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : pyspark.sql.DataFrame , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = "arrow" , **__UpperCAmelCase : str , ) -> Dict:
super().__init__(
split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase_= load_from_cache_file
UpperCAmelCase_= file_format
UpperCAmelCase_= Spark(
df=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , working_dir=__UpperCAmelCase , **__UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCAmelCase_= None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 277 | 1 |
def __UpperCamelCase ( _lowerCAmelCase ) -> str:
"""simple docstring"""
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
A : Optional[int] = False
if num < 0:
A : Optional[int] = True
A : str = -num
A : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_lowerCAmelCase ) for e in binary )
return "0b" + "".join(str(_lowerCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
SCREAMING_SNAKE_CASE_:Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_:Optional[Any] = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
SCREAMING_SNAKE_CASE_:int = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_:int = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
SCREAMING_SNAKE_CASE_:Dict = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : str = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Tuple = FunnelTokenizer
__lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = 2
def __init__( self, lowerCamelCase__=None, lowerCamelCase__=None, lowerCamelCase__=True, lowerCamelCase__="<unk>", lowerCamelCase__="<sep>", lowerCamelCase__="<pad>", lowerCamelCase__="<cls>", lowerCamelCase__="<mask>", lowerCamelCase__="<s>", lowerCamelCase__="</s>", lowerCamelCase__=True, lowerCamelCase__=True, lowerCamelCase__=None, lowerCamelCase__="##", **lowerCamelCase__, ):
super().__init__(
lowerCamelCase__, tokenizer_file=lowerCamelCase__, do_lower_case=lowerCamelCase__, unk_token=lowerCamelCase__, sep_token=lowerCamelCase__, pad_token=lowerCamelCase__, cls_token=lowerCamelCase__, mask_token=lowerCamelCase__, bos_token=lowerCamelCase__, eos_token=lowerCamelCase__, clean_text=lowerCamelCase__, tokenize_chinese_chars=lowerCamelCase__, strip_accents=lowerCamelCase__, wordpieces_prefix=lowerCamelCase__, **lowerCamelCase__, )
A : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""", lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", lowerCamelCase__ ) != tokenize_chinese_chars
):
A : Optional[Any] = getattr(lowerCamelCase__, normalizer_state.pop("""type""" ) )
A : Union[str, Any] = do_lower_case
A : List[Any] = strip_accents
A : Optional[int] = tokenize_chinese_chars
A : Dict = normalizer_class(**lowerCamelCase__ )
A : List[Any] = do_lower_case
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None ):
A : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : Any = [self.sep_token_id]
A : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ = None ):
A : int = self._tokenizer.model.save(lowerCamelCase__, name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 116 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase_ ( unittest.TestCase):
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : str=13 , __UpperCamelCase : Union[str, Any]=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=True , __UpperCamelCase : str=99 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : str=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=512 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[Any]=0.0_2 , __UpperCamelCase : List[Any]=4 , ) -> Optional[int]:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self : List[Any] ) -> Any:
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = True
snake_case__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self : Optional[int] ) -> Dict:
_UpperCamelCase = FlaxRoFormerModelTester(self )
@slow
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=__UpperCamelCase )
_UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase):
@slow
def _UpperCamelCase ( self : Dict ) -> int:
_UpperCamelCase = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
_UpperCamelCase = jnp.array([[0, 1, 2, 3, 4, 5]] )
_UpperCamelCase = model(__UpperCamelCase )[0]
_UpperCamelCase = 5_0000
_UpperCamelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCamelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1E-4 ) )
| 54 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''deta'''
snake_case__ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=900 , __UpperCamelCase : Dict=2048 , __UpperCamelCase : Dict=6 , __UpperCamelCase : Union[str, Any]=2048 , __UpperCamelCase : str=8 , __UpperCamelCase : List[Any]=6 , __UpperCamelCase : Union[str, Any]=1024 , __UpperCamelCase : Optional[int]=8 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any="relu" , __UpperCamelCase : Dict=256 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Any=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1.0 , __UpperCamelCase : Dict=True , __UpperCamelCase : str=False , __UpperCamelCase : List[Any]="sine" , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : int=4 , __UpperCamelCase : Dict=True , __UpperCamelCase : List[Any]=300 , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[str]=1 , __UpperCamelCase : Optional[Any]=5 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Tuple=1 , __UpperCamelCase : int=1 , __UpperCamelCase : str=5 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Tuple=0.2_5 , **__UpperCamelCase : Union[str, Any] , ) -> Optional[int]:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCamelCase = backbone_config.pop('''model_type''' )
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__UpperCamelCase )
_UpperCamelCase = backbone_config
_UpperCamelCase = num_queries
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
# deformable attributes
_UpperCamelCase = num_feature_levels
_UpperCamelCase = encoder_n_points
_UpperCamelCase = decoder_n_points
_UpperCamelCase = two_stage
_UpperCamelCase = two_stage_num_proposals
_UpperCamelCase = with_box_refine
_UpperCamelCase = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
_UpperCamelCase = focal_alpha
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def _UpperCamelCase ( self : Optional[Any] ) -> int:
return self.encoder_attention_heads
@property
def _UpperCamelCase ( self : List[str] ) -> int:
return self.d_model
def _UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 54 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCAmelCase ( metaclass=__a):
__a : Tuple = ["""torch""", """torchsde"""]
def __init__( self , *_A , **_A ) -> int:
'''simple docstring'''
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def __snake_case ( cls , *_A , **_A ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def __snake_case ( cls , *_A , **_A ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["""torch""", """torchsde"""] )
| 246 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
lowerCamelCase__ : str = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
lowerCamelCase__ : List[Any] = re.compile(r'''([a-z\d])([A-Z])''')
lowerCamelCase__ : int = re.compile(r'''(?<!_)_(?!_)''')
lowerCamelCase__ : List[str] = re.compile(r'''(_{2,})''')
lowerCamelCase__ : List[Any] = r'''^\w+(\.\w+)*$'''
lowerCamelCase__ : str = r'''<>:/\|?*'''
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> str:
_UpperCAmelCase : Any = _uppercase_uppercase_re.sub(R"""\1_\2""", _lowerCAmelCase )
_UpperCAmelCase : Tuple = _lowercase_uppercase_re.sub(R"""\1_\2""", _lowerCAmelCase )
return name.lower()
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> Tuple:
_UpperCAmelCase : Optional[Any] = _single_underscore_re.split(_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = [_multiple_underscores_re.split(_lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCAmelCase ) if n != """""" )
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> str:
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : Tuple ) -> int:
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re, _lowerCAmelCase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(_lowerCAmelCase )}-{split}'''
def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple=None ) -> List[Any]:
_UpperCAmelCase : Optional[int] = filename_prefix_for_split(_lowerCAmelCase, _lowerCAmelCase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
_UpperCAmelCase : int = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
return f'''{filepath}*'''
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : str, _lowerCAmelCase : List[Any], _lowerCAmelCase : Any=None, _lowerCAmelCase : int=None ) -> str:
_UpperCAmelCase : Union[str, Any] = filename_prefix_for_split(_lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
if shard_lengths:
_UpperCAmelCase : List[str] = len(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_lowerCAmelCase )]
if filetype_suffix:
_UpperCAmelCase : Union[str, Any] = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_UpperCAmelCase : Any = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 246 | 1 |
def lowerCamelCase_ ( _a ):
"""simple docstring"""
stooge(_a , 0 , len(_a ) - 1 )
return arr
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCAmelCase__ : Tuple = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_a , _a , (h - t) )
# Recursively sort last 2/3 elements
stooge(_a , i + t , (_a) )
# Recursively sort first 2/3 elements
stooge(_a , _a , (h - t) )
if __name__ == "__main__":
lowerCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase = [int(item) for item in user_input.split(''',''')]
print(stooge_sort(unsorted))
| 211 |
from math import isqrt
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _a , _a ):
lowerCAmelCase__ : int = False
return [i for i in range(2 , _a ) if is_prime[i]]
def lowerCamelCase_ ( _a = 10**8 ):
"""simple docstring"""
lowerCAmelCase__ : Any = calculate_prime_numbers(max_number // 2 )
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Optional[int] = len(_a ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 211 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Union[str, Any] = 16
lowercase__ : Dict = 32
def _lowerCAmelCase ( __snake_case : Accelerator , __snake_case : int = 16 ) -> Tuple:
__A : Dict = AutoTokenizer.from_pretrained('bert-base-cased' )
__A : Dict = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__A : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__A : Any = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__A : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__A : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__A : List[str] = 16
elif accelerator.mixed_precision != "no":
__A : Dict = 8
else:
__A : Tuple = None
return tokenizer.pad(
__snake_case , padding='longest' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
__A : Tuple = DataLoader(
tokenized_datasets['train'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
__A : Union[str, Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : Union[str, Any] = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Optional[Any] ) -> int:
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __snake_case ) == "1":
__A : int = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__A : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
__A : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__A : Union[str, Any] = config['lr']
__A : Any = int(config['num_epochs'] )
__A : Dict = int(config['seed'] )
__A : Union[str, Any] = int(config['batch_size'] )
set_seed(__snake_case )
__A ,__A : List[str] = get_dataloaders(__snake_case , __snake_case )
__A : Optional[Any] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__A : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__A : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
__A : Union[str, Any] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__A : str = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__A : int = model.to(accelerator.device )
# Instantiate optimizer
__A : str = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
__A : Any = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=1_00 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__A ,__A ,__A ,__A ,__A : Any = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__A : int = os.path.split(__snake_case )[-1].split('.' )[0]
accelerator.init_trackers(__snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__A : int = 0
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__A : List[Any] = model(**__snake_case )
__A : List[str] = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__A : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__A : int = model(**__snake_case )
__A : Any = outputs.logits.argmax(dim=-1 )
__A ,__A : List[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
__A : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __snake_case )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(__snake_case ),
'epoch': epoch,
} , step=__snake_case , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ) -> Tuple:
__A : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__snake_case , default=__snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=__snake_case , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
__A : Tuple = parser.parse_args()
__A : Union[str, Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main() | 190 |
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
@staticmethod
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
pass
def _lowerCAmelCase ( __snake_case : Image ) -> str:
__A : Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _lowerCAmelCase ( __snake_case : Image ) -> Dict:
__A : Dict = np.array(__snake_case )
__A : List[Any] = npimg.shape
return {"hash": hashimage(__snake_case ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase)
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = pipeline('mask-generation' , model='facebook/sam-vit-huge')
__A : Tuple = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256)
# Shortening by hashing
__A : int = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = 'facebook/sam-vit-huge'
__A : Optional[Any] = pipeline('mask-generation' , model=_UpperCAmelCase)
__A : int = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256)
# Shortening by hashing
__A : int = []
for i, o in enumerate(outputs['masks']):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053},
] , ) | 190 | 1 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class snake_case_ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any , _snake_case : int , _snake_case : int )->str:
'''simple docstring'''
__lowerCAmelCase : Any = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase__ ( self : List[Any] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = None
__lowerCAmelCase : Optional[int] = 20
__lowerCAmelCase : Union[str, Any] = self._get_uniform_logits(batch_size=2 , length=_lowerCamelCase )
# tweak scores to not be uniform anymore
__lowerCAmelCase : Union[str, Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__lowerCAmelCase : Tuple = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__lowerCAmelCase : Any = jax.nn.softmax(_lowerCamelCase , axis=-1 )
__lowerCAmelCase : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase : Any = FlaxTemperatureLogitsWarper(temperature=1.3 )
__lowerCAmelCase : Dict = jax.nn.softmax(temp_dist_warper_sharper(_lowerCamelCase , scores.copy() , cur_len=_lowerCamelCase ) , axis=-1 )
__lowerCAmelCase : Any = jax.nn.softmax(temp_dist_warper_smoother(_lowerCamelCase , scores.copy() , cur_len=_lowerCamelCase ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase__ ( self : Dict )->Dict:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : List[str] = 10
__lowerCAmelCase : int = 2
# create ramp distribution
__lowerCAmelCase : int = np.broadcast_to(np.arange(_lowerCamelCase )[None, :] , (batch_size, vocab_size) ).copy()
__lowerCAmelCase : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size
__lowerCAmelCase : Optional[Any] = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase : Any = top_k_warp(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__lowerCAmelCase : List[str] = 5
__lowerCAmelCase : str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__lowerCAmelCase : Optional[int] = np.broadcast_to(np.arange(_lowerCamelCase )[None, :] , (batch_size, length) ).copy()
__lowerCAmelCase : Dict = top_k_warp_safety_check(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase__ ( self : List[Any] )->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Dict = 10
__lowerCAmelCase : List[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__lowerCAmelCase : List[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__lowerCAmelCase : Optional[Any] = FlaxTopPLogitsWarper(0.8 )
__lowerCAmelCase : List[Any] = np.exp(top_p_warp(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__lowerCAmelCase : List[str] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
# check edge cases with negative and extreme logits
__lowerCAmelCase : Optional[int] = np.broadcast_to(np.arange(_lowerCamelCase )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__lowerCAmelCase : Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
__lowerCAmelCase : Tuple = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__lowerCAmelCase : List[str] = top_p_warp(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : Any = 20
__lowerCAmelCase : List[Any] = 4
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCamelCase )
# check that min length is applied at length 5
__lowerCAmelCase : List[Any] = ids_tensor((batch_size, 20) , vocab_size=20 )
__lowerCAmelCase : List[Any] = 5
__lowerCAmelCase : Union[str, Any] = self._get_uniform_logits(_lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase : Tuple = min_dist_processor(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("""inf""" )] )
# check that min length is not applied anymore at length 15
__lowerCAmelCase : str = self._get_uniform_logits(_lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase : int = 15
__lowerCAmelCase : Tuple = min_dist_processor(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
self.assertFalse(jnp.isinf(_lowerCamelCase ).any() )
def UpperCAmelCase__ ( self : List[Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Any = 20
__lowerCAmelCase : str = 4
__lowerCAmelCase : str = 0
__lowerCAmelCase : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCamelCase )
# check that all scores are -inf except the bos_token_id score
__lowerCAmelCase : Tuple = ids_tensor((batch_size, 1) , vocab_size=20 )
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : str = self._get_uniform_logits(_lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase : Union[str, Any] = logits_processor(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__lowerCAmelCase : Any = 3
__lowerCAmelCase : Any = self._get_uniform_logits(_lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase : Union[str, Any] = logits_processor(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
self.assertFalse(jnp.isinf(_lowerCamelCase ).any() )
def UpperCAmelCase__ ( self : str )->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = 20
__lowerCAmelCase : int = 4
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : List[str] = 5
__lowerCAmelCase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCamelCase , eos_token_id=_lowerCamelCase )
# check that all scores are -inf except the eos_token_id when max_length is reached
__lowerCAmelCase : List[str] = ids_tensor((batch_size, 4) , vocab_size=20 )
__lowerCAmelCase : List[str] = 4
__lowerCAmelCase : int = self._get_uniform_logits(_lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase : Optional[Any] = logits_processor(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__lowerCAmelCase : Optional[Any] = 3
__lowerCAmelCase : str = self._get_uniform_logits(_lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase : str = logits_processor(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
self.assertFalse(jnp.isinf(_lowerCamelCase ).any() )
def UpperCAmelCase__ ( self : List[str] )->Dict:
'''simple docstring'''
__lowerCAmelCase : int = 4
__lowerCAmelCase : List[Any] = 10
__lowerCAmelCase : Union[str, Any] = 15
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Dict = 15
# dummy input_ids and scores
__lowerCAmelCase : Any = ids_tensor((batch_size, sequence_length) , _lowerCamelCase )
__lowerCAmelCase : Optional[int] = input_ids.copy()
__lowerCAmelCase : int = self._get_uniform_logits(_lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase : List[str] = scores.copy()
# instantiate all dist processors
__lowerCAmelCase : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase : Any = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase : Any = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCAmelCase : Union[str, Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCamelCase )
__lowerCAmelCase : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCamelCase )
__lowerCAmelCase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCamelCase , eos_token_id=_lowerCamelCase )
__lowerCAmelCase : Optional[int] = 10
# no processor list
__lowerCAmelCase : str = temp_dist_warp(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
__lowerCAmelCase : Union[str, Any] = top_k_warp(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
__lowerCAmelCase : Union[str, Any] = top_p_warp(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
__lowerCAmelCase : Dict = min_dist_proc(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
__lowerCAmelCase : List[str] = bos_dist_proc(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
__lowerCAmelCase : Union[str, Any] = eos_dist_proc(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
# with processor list
__lowerCAmelCase : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCAmelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase__ ( self : int )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = 4
__lowerCAmelCase : Union[str, Any] = 10
__lowerCAmelCase : Any = 15
__lowerCAmelCase : List[Any] = 2
__lowerCAmelCase : int = 1
__lowerCAmelCase : Optional[Any] = 15
# dummy input_ids and scores
__lowerCAmelCase : Dict = ids_tensor((batch_size, sequence_length) , _lowerCamelCase )
__lowerCAmelCase : Union[str, Any] = input_ids.copy()
__lowerCAmelCase : Optional[int] = self._get_uniform_logits(_lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase : str = scores.copy()
# instantiate all dist processors
__lowerCAmelCase : Tuple = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCAmelCase : List[Any] = FlaxTopKLogitsWarper(3 )
__lowerCAmelCase : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCAmelCase : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=_lowerCamelCase )
__lowerCAmelCase : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=_lowerCamelCase )
__lowerCAmelCase : Tuple = FlaxForcedEOSTokenLogitsProcessor(max_length=_lowerCamelCase , eos_token_id=_lowerCamelCase )
__lowerCAmelCase : str = 10
# no processor list
def run_no_processor_list(_snake_case : List[Any] , _snake_case : Tuple , _snake_case : Optional[int] ):
__lowerCAmelCase : Any = temp_dist_warp(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
__lowerCAmelCase : Optional[int] = top_k_warp(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
__lowerCAmelCase : Union[str, Any] = top_p_warp(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
__lowerCAmelCase : Optional[int] = min_dist_proc(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
__lowerCAmelCase : Dict = bos_dist_proc(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
__lowerCAmelCase : Optional[int] = eos_dist_proc(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
return scores
# with processor list
def run_processor_list(_snake_case : List[Any] , _snake_case : Any , _snake_case : str ):
__lowerCAmelCase : str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCAmelCase : int = processor(_lowerCamelCase , _lowerCamelCase , cur_len=_lowerCamelCase )
return scores
__lowerCAmelCase : Dict = jax.jit(_lowerCamelCase )
__lowerCAmelCase : Dict = jax.jit(_lowerCamelCase )
__lowerCAmelCase : int = jitted_run_no_processor_list(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCAmelCase : List[Any] = jitted_run_processor_list(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# scores should be equal
self.assertTrue(jnp.allclose(_lowerCamelCase , _lowerCamelCase , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 368 |
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> bytes:
__lowerCAmelCase : List[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__lowerCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
_UpperCAmelCase = input('Enter Video/IGTV url: ').strip()
_UpperCAmelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''') | 232 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCamelCase :Optional[int] = list[list[int]]
# assigning initial values to the grid
lowerCamelCase :Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCamelCase :Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a ( lowerCamelCase__ ):
'''simple docstring'''
if location := find_empty_location(lowerCamelCase__ ):
A_, A_ : Union[str, Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
A_ : int = digit
if sudoku(lowerCamelCase__ ) is not None:
return grid
A_ : Dict = 0
return None
def a ( lowerCamelCase__ ):
'''simple docstring'''
for row in grid:
for cell in row:
print(lowerCamelCase__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 2_0)
print_solution(example_grid)
print('''\nExample grid solution:''')
lowerCamelCase :Optional[int] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''') | 206 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase :str = TypeVar('''T''')
class _lowerCAmelCase ( Generic[T] ):
def __init__(self , lowercase = True ):
A_ : dict[T, list[T]] = {} # dictionary of lists
A_ : Any = directed
def _a (self , lowercase , lowercase ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
self.adj_list[destination_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
A_ : Dict = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowercase )
A_ : int = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A_ : Optional[Any] = [destination_vertex]
A_ : Tuple = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowercase )
A_ : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A_ : Tuple = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A_ : int = [destination_vertex]
A_ : List[str] = []
return self
def __repr__(self ):
return pformat(self.adj_list ) | 206 | 1 |
import socket
def _snake_case( ) -> List[Any]:
lowercase : Optional[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase : Any = socket.gethostname()
lowercase : List[Any] = 12_312
sock.connect((host, port) )
sock.send(B"""Hello server!""" )
with open("""Received_file""" , """wb""" ) as out_file:
print("""File opened""" )
print("""Receiving data...""" )
while True:
lowercase : Dict = sock.recv(1_024 )
if not data:
break
out_file.write(SCREAMING_SNAKE_CASE__ )
print("""Successfully received the file""" )
sock.close()
print("""Connection closed""" )
if __name__ == "__main__":
main()
| 285 |
from __future__ import annotations
import numpy as np
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> tuple[np.ndarray, np.ndarray]:
lowercase , lowercase : Dict = np.shape(SCREAMING_SNAKE_CASE__ )
if rows != columns:
lowercase : str = (
"""'table' has to be of square shaped array but got a """
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
lowercase : Any = np.zeros((rows, columns) )
lowercase : int = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE__ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
lowercase : str = (table[i][j] - total) / upper[j][j]
lowercase : Optional[Any] = 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE__ ) )
lowercase : Tuple = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase_ (A__ , unittest.TestCase ):
__magic_name__ = KandinskyInpaintPipeline
__magic_name__ = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
__magic_name__ = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
__magic_name__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__magic_name__ = False
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
return self.time_input_dim
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return 100
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_ : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCAmelCase_ : List[str] = MultilingualCLIP(snake_case_ )
UpperCAmelCase_ : str = text_encoder.eval()
return text_encoder
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ : Any = UNetaDConditionModel(**snake_case_ )
return model
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.dummy_text_encoder
UpperCAmelCase_ : int = self.dummy_tokenizer
UpperCAmelCase_ : Dict = self.dummy_unet
UpperCAmelCase_ : str = self.dummy_movq
UpperCAmelCase_ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type="epsilon" , thresholding=snake_case_ , )
UpperCAmelCase_ : Optional[int] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=0 ) -> Tuple:
UpperCAmelCase_ : Any = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
UpperCAmelCase_ : Any = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case_ )
# create init_image
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((256, 256) )
# create mask
UpperCAmelCase_ : List[Any] = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase_ : Union[str, Any] = 0
if str(snake_case_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(snake_case_ )
else:
UpperCAmelCase_ : Any = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCAmelCase_ : List[str] = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : str = "cpu"
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**snake_case_ )
UpperCAmelCase_ : Optional[int] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCAmelCase_ : List[str] = pipe(**self.get_dummy_inputs(snake_case_ ) )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : int = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : List[Any] = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCAmelCase_ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ : int = np.ones((768, 768) , dtype=np.floataa )
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[int] = "a hat"
UpperCAmelCase_ : Any = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
UpperCAmelCase_ : str = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCAmelCase_ : Optional[Any] = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
UpperCAmelCase_ : Any = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ , UpperCAmelCase_ : Any = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ : int = pipeline(
snake_case_ , image=snake_case_ , mask_image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
UpperCAmelCase_ : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 268 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE_ = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=16 , snake_case_=13 , snake_case_=7 , snake_case_=14 , snake_case_=10 , snake_case_=19 , snake_case_=5 , snake_case_=4 , snake_case_=True , snake_case_=16 , snake_case_=2 , snake_case_=4 , snake_case_=4 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=[1, 2, 3, 4, 5] , snake_case_=25 , snake_case_=5 , ) -> Tuple:
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def A__ ( self ) -> List[Any]:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(snake_case_ )
return config, inputs_dict
def A__ ( self ) -> int:
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , snake_case_ , snake_case_ ) -> int:
__lowerCAmelCase = AutoformerModel(config=snake_case_ ).to(snake_case_ ).eval()
__lowerCAmelCase = model(**snake_case_ )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=snake_case_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(snake_case_ )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(snake_case_ ).to(snake_case_ )
__lowerCAmelCase = decoder(
trend=snake_case_ , inputs_embeds=snake_case_ , encoder_hidden_states=snake_case_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_snake_case = (AutoformerForPrediction,) if is_torch_available() else ()
_snake_case = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def A__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def A__ ( self ) -> Dict:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(snake_case_ , output_loading_info=snake_case_ )
self.assertEqual(info["""missing_keys"""] , [] )
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*snake_case_ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def A__ ( self ) -> Any:
pass
def A__ ( self ) -> str:
__lowerCAmelCase = inspect.signature(getattr(snake_case_ , """forward""" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , snake_case_ )
def A__ ( self ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(snake_case_ )] , snake_case_ )
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , """seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """d_model""" , snake_case_ )
__lowerCAmelCase = getattr(self.model_tester , """num_attention_heads""" , snake_case_ )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(snake_case_ )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(snake_case_ , snake_case_ )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(snake_case_ , (list, tuple) )
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
self.assertEqual(out_len + 2 , len(snake_case_ ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> int:
super().test_retain_grad_hidden_states_attentions()
def lowercase (_lowerCAmelCase="train-batch.pt" ):
__lowerCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCAmelCase , repo_type="""dataset""" )
__lowerCAmelCase = torch.load(_lowerCAmelCase , map_location=_lowerCAmelCase )
return batch
@require_torch
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> int:
__lowerCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , snake_case_ )
__lowerCAmelCase = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=snake_case_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def A__ ( self ) -> Any:
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(snake_case_ )
__lowerCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=snake_case_ )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , snake_case_ , rtol=1e-1 ) )
| 301 | 0 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
return (-y * np.log(SCREAMING_SNAKE_CASE ) - (1 - y) * np.log(1 - h )).mean()
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : int =np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return np.sum(y * scores - np.log(1 + np.exp(SCREAMING_SNAKE_CASE ) ) )
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str=70_000 ):
"""simple docstring"""
a__ : List[str] =np.zeros(x.shape[1] )
for iterations in range(SCREAMING_SNAKE_CASE ):
a__ : Tuple =np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Tuple =sigmoid_function(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =np.dot(x.T , h - y ) / y.size
a__ : Tuple =theta - alpha * gradient # updating the weights
a__ : int =np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =sigmoid_function(SCREAMING_SNAKE_CASE )
a__ : List[Any] =cost_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
UpperCAmelCase : List[str] = datasets.load_iris()
UpperCAmelCase : Any = iris.data[:, :2]
UpperCAmelCase : Union[str, Any] = (iris.target != 0) * 1
UpperCAmelCase : str = 0.1
UpperCAmelCase : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=70000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def _A ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
return sigmoid_function(
np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
(UpperCAmelCase) : Any = (x[:, 0].min(), x[:, 0].max())
(UpperCAmelCase) : str = (x[:, 1].min(), x[:, 1].max())
(UpperCAmelCase) : Union[str, Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
UpperCAmelCase : List[str] = np.c_[xxa.ravel(), xxa.ravel()]
UpperCAmelCase : Dict = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 357 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
a__ : Tuple =set()
a__ : Optional[Any] =[]
def parse_line(SCREAMING_SNAKE_CASE : Optional[int] ):
for line in fp:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ : str =line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(SCREAMING_SNAKE_CASE ) > 0:
a__ : Union[str, Any] ="\n".join(SCREAMING_SNAKE_CASE )
# Only keep the warnings specified in `targets`
if any(f''': {x}: ''' in warning for x in targets ):
selected_warnings.add(SCREAMING_SNAKE_CASE )
buffer.clear()
continue
else:
a__ : Optional[Any] =line.strip()
buffer.append(SCREAMING_SNAKE_CASE )
if from_gh:
for filename in os.listdir(SCREAMING_SNAKE_CASE ):
a__ : str =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with open(SCREAMING_SNAKE_CASE ) as fp:
parse_line(SCREAMING_SNAKE_CASE )
else:
try:
with zipfile.ZipFile(SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with z.open(SCREAMING_SNAKE_CASE ) as fp:
parse_line(SCREAMING_SNAKE_CASE )
except Exception:
logger.warning(
f'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' )
return selected_warnings
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
a__ : Optional[int] =set()
a__ : Any =[os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for p in os.listdir(SCREAMING_SNAKE_CASE ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
return selected_warnings
if __name__ == "__main__":
def _A ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
return values.split("," )
UpperCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
UpperCAmelCase : List[Any] = parser.parse_args()
UpperCAmelCase : str = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
UpperCAmelCase : Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
UpperCAmelCase : Tuple = extract_warnings(args.output_dir, args.targets)
UpperCAmelCase : Optional[Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 148 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if height >= 1:
move_tower(height - 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
move_disk(__UpperCAmelCase, __UpperCAmelCase )
move_tower(height - 1, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
print('''moving disk from''', __UpperCAmelCase, '''to''', __UpperCAmelCase )
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
snake_case_ = int(input('''Height of hanoi: ''' ).strip() )
move_tower(__UpperCAmelCase, '''A''', '''B''', '''C''' )
if __name__ == "__main__":
main()
| 56 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a ( _lowerCamelCase ):
snake_case_ = 42
@flax_register_to_config
class a ( nn.Module , _lowerCamelCase , _lowerCamelCase ):
snake_case_ = 32
snake_case_ = 4
snake_case_ = 4
snake_case_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
snake_case_ = False
snake_case_ = (320, 640, 1_280, 1_280)
snake_case_ = 2
snake_case_ = 8
snake_case_ = None
snake_case_ = 1_280
snake_case_ = 0.0
snake_case_ = False
snake_case_ = jnp.floataa
snake_case_ = True
snake_case_ = 0
snake_case_ = False
def A_ ( self : Optional[int] , lowercase_ : jax.random.KeyArray ):
# init input tensors
snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ = jnp.zeros(lowercase_ , dtype=jnp.floataa )
snake_case_ = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_ ,snake_case_ = jax.random.split(lowercase_ )
snake_case_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"]
def A_ ( self : List[str] ):
snake_case_ = self.block_out_channels
snake_case_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype )
snake_case_ = self.only_cross_attention
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ = []
snake_case_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(lowercase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ = FlaxDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase_ )
snake_case_ = down_blocks
# mid
snake_case_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case_ = []
snake_case_ = list(reversed(lowercase_ ) )
snake_case_ = list(reversed(lowercase_ ) )
snake_case_ = list(reversed(lowercase_ ) )
snake_case_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case_ = output_channel
snake_case_ = reversed_block_out_channels[i]
snake_case_ = reversed_block_out_channels[min(i + 1 , len(lowercase_ ) - 1 )]
snake_case_ = i == len(lowercase_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case_ = FlaxCrossAttnUpBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ = FlaxUpBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowercase_ )
snake_case_ = output_channel
snake_case_ = up_blocks
# out
snake_case_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
snake_case_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : int=None , lowercase_ : Any=None , lowercase_ : bool = True , lowercase_ : bool = False , ):
# 1. time
if not isinstance(lowercase_ , jnp.ndarray ):
snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps.astype(dtype=jnp.floataa )
snake_case_ = jnp.expand_dims(lowercase_ , 0 )
snake_case_ = self.time_proj(lowercase_ )
snake_case_ = self.time_embedding(lowercase_ )
# 2. pre-process
snake_case_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
snake_case_ = self.conv_in(lowercase_ )
# 3. down
snake_case_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase_ , lowercase_ ):
snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
else:
snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowercase_ , lowercase_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case_ = new_down_block_res_samples
# 4. mid
snake_case_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case_ = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = up_block(
lowercase_ , temb=lowercase_ , encoder_hidden_states=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train , )
else:
snake_case_ = up_block(lowercase_ , temb=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train )
# 6. post-process
snake_case_ = self.conv_norm_out(lowercase_ )
snake_case_ = nn.silu(lowercase_ )
snake_case_ = self.conv_out(lowercase_ )
snake_case_ = jnp.transpose(lowercase_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowercase_ )
| 56 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = set()
# edges = list of graph's edges
_lowerCAmelCase = get_edges(_a )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_lowerCAmelCase = edges.pop()
chosen_vertices.add(_a )
chosen_vertices.add(_a )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_a )
return chosen_vertices
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 364 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(snake_case_ ) , '''Tatoeba directory does not exist.''' )
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ) -> Any:
_lowerCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__snake_case )
@slow
def lowercase__ ( self : Dict ) -> int:
self.resolver.convert_models(["""heb-eng"""] )
@slow
def lowercase__ ( self : Optional[int] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=__snake_case )
assert mmeta["long_pair"] == "heb-eng"
| 220 | 0 |
'''simple docstring'''
def lowerCAmelCase (__A):
"""simple docstring"""
if edge <= 0 or not isinstance(__lowercase , __lowercase):
raise ValueError('''Length must be a positive.''')
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowerCAmelCase (__A):
"""simple docstring"""
if edge <= 0 or not isinstance(__lowercase , __lowercase):
raise ValueError('''Length must be a positive.''')
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''')
class A_ ( Generic[T] ):
def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ):
_UpperCAmelCase = None
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = [any_type for _ in range(self.N )] + arr
_UpperCAmelCase = fnc
self.build()
def lowercase ( self : List[Any] ):
for p in range(self.N - 1 , 0 , -1 ):
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ):
p += self.N
_UpperCAmelCase = v
while p > 1:
_UpperCAmelCase = p // 2
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741
_UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N
_UpperCAmelCase = None
while l <= r:
if l % 2 == 1:
_UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] )
if r % 2 == 0:
_UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] )
_UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__SCREAMING_SNAKE_CASE :List[str] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b)
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
for i in range(len(__lowercase ) ):
for j in range(__lowercase , len(__lowercase ) ):
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowercase , __lowercase )
assert max_range == max_segment_tree.query(__lowercase , __lowercase )
assert sum_range == sum_segment_tree.query(__lowercase , __lowercase )
test_all_segments()
for index, value in test_updates.items():
__SCREAMING_SNAKE_CASE :str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 22 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 2 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/unispeech-sat-base-100h-libri-ft": (
"https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Any = "unispeech-sat"
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
lowercase__: Union[str, Any] = hidden_size
lowercase__: Union[str, Any] = feat_extract_norm
lowercase__: Any = feat_extract_activation
lowercase__: List[Any] = list(_UpperCAmelCase )
lowercase__: Optional[int] = list(_UpperCAmelCase )
lowercase__: int = list(_UpperCAmelCase )
lowercase__: Any = conv_bias
lowercase__: List[str] = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: int = len(self.conv_dim )
lowercase__: Dict = num_hidden_layers
lowercase__: List[Any] = intermediate_size
lowercase__: Dict = hidden_act
lowercase__: Optional[Any] = num_attention_heads
lowercase__: Union[str, Any] = hidden_dropout
lowercase__: List[Any] = attention_dropout
lowercase__: str = activation_dropout
lowercase__: Optional[Any] = feat_proj_dropout
lowercase__: Optional[int] = final_dropout
lowercase__: Any = layerdrop
lowercase__: int = layer_norm_eps
lowercase__: Any = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Optional[Any] = num_clusters
lowercase__: Dict = do_stable_layer_norm
lowercase__: List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: Dict = apply_spec_augment
lowercase__: Union[str, Any] = mask_time_prob
lowercase__: List[str] = mask_time_length
lowercase__: Union[str, Any] = mask_time_min_masks
lowercase__: str = mask_feature_prob
lowercase__: Dict = mask_feature_length
lowercase__: List[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__: Tuple = num_codevectors_per_group
lowercase__: Optional[Any] = num_codevector_groups
lowercase__: int = contrastive_logits_temperature
lowercase__: Any = feat_quantizer_dropout
lowercase__: int = num_negatives
lowercase__: Optional[Any] = codevector_dim
lowercase__: int = proj_codevector_dim
lowercase__: str = diversity_loss_weight
# ctc loss
lowercase__: int = ctc_loss_reduction
lowercase__: Union[str, Any] = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = list(_UpperCAmelCase )
lowercase__: Union[str, Any] = list(_UpperCAmelCase )
lowercase__: Tuple = xvector_output_dim
@property
def _snake_case ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 2 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''ylacombe/bark-small'''
lowerCamelCase_ =tempfile.mkdtemp()
lowerCamelCase_ ='''en_speaker_1'''
lowerCamelCase_ ='''This is a test string'''
lowerCamelCase_ ='''speaker_embeddings_path.json'''
lowerCamelCase_ ='''speaker_embeddings'''
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint, **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =BarkProcessor(tokenizer=lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, )
processor.save_pretrained(
self.tmpdirname, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, speaker_embeddings_directory=self.speaker_embeddings_directory, )
lowerCamelCase_ =self.get_tokenizer(bos_token='''(BOS)''', eos_token='''(EOS)''' )
lowerCamelCase_ =BarkProcessor.from_pretrained(
self.tmpdirname, self.speaker_embeddings_dict_path, bos_token='''(BOS)''', eos_token='''(EOS)''', )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, )
lowerCamelCase_ =35
lowerCamelCase_ =2
lowerCamelCase_ =8
lowerCamelCase_ ={
'''semantic_prompt''': np.ones(lowerCAmelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase_ =processor(text=self.input_string, voice_preset=lowerCAmelCase )
lowerCamelCase_ =inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(lowerCAmelCase, np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase_ =os.path.join(self.tmpdirname, '''file.npz''' )
np.savez(lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =processor(text=self.input_string, voice_preset=lowerCAmelCase )
lowerCamelCase_ =inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(lowerCAmelCase, np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase_ =processor(text=self.input_string, voice_preset=self.voice_preset )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =BarkProcessor(tokenizer=lowerCAmelCase )
lowerCamelCase_ =processor(text=self.input_string )
lowerCamelCase_ =tokenizer(
self.input_string, padding='''max_length''', max_length=256, add_special_tokens=lowerCAmelCase, return_attention_mask=lowerCAmelCase, return_token_type_ids=lowerCAmelCase, )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key].squeeze().tolist() )
| 75 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def lowercase__ ( *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
pass
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Tuple = np.array(_UpperCAmelCase )
lowerCAmelCase : Dict = npimg.shape
return {"hash": hashimage(_UpperCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Dict = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase_ : Any = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ):
lowerCAmelCase : List[str] = MaskGenerationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def lowercase__ ( self : Dict ):
pass
@slow
@require_torch
def lowercase__ ( self : str ):
lowerCAmelCase : Optional[int] = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
lowerCAmelCase : Union[str, Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
lowerCAmelCase : List[str] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_21},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.99_67},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_93},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.99_09},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.98_79},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.98_34},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.97_16},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.96_12},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.95_99},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.95_52},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.95_32},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.95_16},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.94_99},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.94_83},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.94_64},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.94_08},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.93_35},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.93_26},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.92_62},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.89_99},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.89_86},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.89_84},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.88_73},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = 'facebook/sam-vit-huge'
lowerCAmelCase : str = pipeline('mask-generation' , model=UpperCAmelCase_ )
lowerCAmelCase : int = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowerCAmelCase : Optional[int] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.02_10},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
] , )
| 138 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = MvpTokenizer
lowerCAmelCase = MvpTokenizerFast
lowerCAmelCase = True
lowerCAmelCase = filter_roberta_detectors
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setUp()
__A : List[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__A : List[str] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Optional[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__A : int = {'unk_token': '<unk>'}
__A : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_UpperCAmelCase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(_UpperCAmelCase))
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return MvpTokenizer.from_pretrained('RUCAIBox/mvp')
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp')
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__A : Any = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A : Any = tokenizer(_UpperCAmelCase , max_length=len(_UpperCAmelCase) , padding=_UpperCAmelCase , return_tensors='pt')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
__A : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
# Test that special tokens are reset
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A : Tuple = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt')
# check if input_ids are returned and no labels
self.assertIn('input_ids' , _UpperCAmelCase)
self.assertIn('attention_mask' , _UpperCAmelCase)
self.assertNotIn('labels' , _UpperCAmelCase)
self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase)
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A : Union[str, Any] = tokenizer(text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , return_tensors='pt')
self.assertEqual(32 , targets['input_ids'].shape[1])
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A : List[Any] = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='pt')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = ['A long paragraph for summarization.']
__A : Any = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A : List[str] = tokenizer(_UpperCAmelCase , text_target=_UpperCAmelCase , return_tensors='pt')
__A : Union[str, Any] = inputs['input_ids']
__A : int = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
__A : Tuple = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
__A : Optional[Any] = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
__A : int = 'A, <mask> AllenNLP sentence.'
__A : Optional[int] = tokenizer_r.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase)
__A : Any = tokenizer_p.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids']) , sum(tokens_p['token_type_ids']))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']) , sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']) , )
__A : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
__A : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2])
self.assertSequenceEqual(
_UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
self.assertSequenceEqual(
_UpperCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>']) | 190 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : str = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''roberta-prelayernorm'''
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase)
__A : Union[str, Any] = vocab_size
__A : List[str] = hidden_size
__A : Tuple = num_hidden_layers
__A : List[str] = num_attention_heads
__A : Tuple = hidden_act
__A : Optional[Any] = intermediate_size
__A : List[Any] = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : Dict = max_position_embeddings
__A : Any = type_vocab_size
__A : Optional[int] = initializer_range
__A : List[str] = layer_norm_eps
__A : Optional[int] = position_embedding_type
__A : Dict = use_cache
__A : List[str] = classifier_dropout
class SCREAMING_SNAKE_CASE (a__ ):
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if self.task == "multiple-choice":
__A : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__A : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
]) | 190 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _a ( SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = filter(lambda SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
__lowerCAmelCase: Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_a = logging.getLogger(__name__)
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
if metric == "rouge2":
__lowerCAmelCase: int = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__lowerCAmelCase: Union[str, Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__lowerCAmelCase: str = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__lowerCAmelCase: Tuple = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
__lowerCAmelCase: Dict = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , monitor=f'''val_{metric}''' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return EarlyStopping(
monitor=f'''val_{metric}''' , mode='min' if 'loss' in metric else 'max' , patience=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , )
class A_ ( pl.Callback ):
def UpperCAmelCase ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Any ) -> List[Any]:
__lowerCAmelCase: Dict = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(UpperCAmelCase )
@rank_zero_only
def UpperCAmelCase ( self : Optional[int] , UpperCAmelCase : pl.Trainer , UpperCAmelCase : pl.LightningModule , UpperCAmelCase : str , UpperCAmelCase : Tuple=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
__lowerCAmelCase: Optional[int] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__lowerCAmelCase: Any = Path(pl_module.hparams.output_dir )
if type_path == "test":
__lowerCAmelCase: List[str] = od / 'test_results.txt'
__lowerCAmelCase: List[str] = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__lowerCAmelCase: Tuple = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
__lowerCAmelCase: Tuple = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=UpperCAmelCase )
generations_file.parent.mkdir(exist_ok=UpperCAmelCase )
with open(UpperCAmelCase , 'a+' ) as writer:
for key in sorted(UpperCAmelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
__lowerCAmelCase: int = metrics[key]
if isinstance(UpperCAmelCase , torch.Tensor ):
__lowerCAmelCase: int = val.item()
__lowerCAmelCase: List[Any] = F'''{key}: {val:.6f}\n'''
writer.write(UpperCAmelCase )
if not save_generations:
return
if "preds" in metrics:
__lowerCAmelCase: str = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(UpperCAmelCase )
@rank_zero_only
def UpperCAmelCase ( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ) -> Any:
try:
__lowerCAmelCase: Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
__lowerCAmelCase: Optional[int] = pl_module.model.num_parameters()
__lowerCAmelCase: str = count_trainable_parameters(UpperCAmelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase : pl.Trainer , UpperCAmelCase : pl.LightningModule ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(UpperCAmelCase , UpperCAmelCase , 'test' )
@rank_zero_only
def UpperCAmelCase ( self : str , UpperCAmelCase : pl.Trainer , UpperCAmelCase : List[Any] ) -> List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 322 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
_a = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_2_8,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 5_0,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 1_0,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 1_0,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def UpperCAmelCase ( cls : Dict ) -> List[str]:
__lowerCAmelCase: str = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : str ) -> List[Any]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCAmelCase ( self : int ) -> Optional[int]:
__lowerCAmelCase: Any = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('test-config' , use_auth_token=self._token )
__lowerCAmelCase: str = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , repo_id='test-config' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : int ) -> Dict:
__lowerCAmelCase: int = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
__lowerCAmelCase: Dict = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='valid_org/test-config-org' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
__lowerCAmelCase: int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
CustomConfig.register_for_auto_class()
__lowerCAmelCase: Any = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
__lowerCAmelCase: int = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=UpperCAmelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 4_2 )
class A_ ( unittest.TestCase ):
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase: List[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase: Union[str, Any] = c.n_embd + 1 # int
__lowerCAmelCase: str = c.resid_pdrop + 1.0 # float
__lowerCAmelCase: List[Any] = not c.scale_attn_weights # bool
__lowerCAmelCase: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(UpperCAmelCase , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(UpperCAmelCase , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(UpperCAmelCase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(UpperCAmelCase , c.summary_type , 'mismatch for key: summary_type' )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
__lowerCAmelCase: str = PretrainedConfig()
__lowerCAmelCase: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
UpperCAmelCase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
__lowerCAmelCase: int = [key for key, value in config_common_kwargs.items() if value == getattr(UpperCAmelCase , UpperCAmelCase )]
if len(UpperCAmelCase ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(UpperCAmelCase )}.''' )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
with self.assertRaises(UpperCAmelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
__lowerCAmelCase: List[str] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase: Union[str, Any] = mock.Mock()
__lowerCAmelCase: str = 5_0_0
__lowerCAmelCase: Optional[Any] = {}
__lowerCAmelCase: Optional[int] = HTTPError
__lowerCAmelCase: List[Any] = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase: Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase ) as mock_head:
__lowerCAmelCase: Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCAmelCase ( self : Dict ) -> str:
__lowerCAmelCase: Optional[Any] = AutoConfig.from_pretrained('bert-base-cased' )
__lowerCAmelCase: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(UpperCAmelCase )
__lowerCAmelCase: Tuple = 2
json.dump(configuration.to_dict() , open(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase: Dict = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase: Dict = ['config.42.0.0.json']
__lowerCAmelCase: Optional[int] = 7_6_8
configuration.save_pretrained(UpperCAmelCase )
shutil.move(os.path.join(UpperCAmelCase , 'config.4.0.0.json' ) , os.path.join(UpperCAmelCase , 'config.42.0.0.json' ) )
__lowerCAmelCase: int = AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase: Tuple = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCAmelCase: List[Any] = 'v4.0.0'
__lowerCAmelCase , __lowerCAmelCase: Any = new_transformers.models.auto.AutoConfig.from_pretrained(
UpperCAmelCase , return_unused_kwargs=UpperCAmelCase )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(UpperCAmelCase , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase: List[Any] = 'v3.0.0'
__lowerCAmelCase: Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(UpperCAmelCase )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 322 | 1 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__a = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCAmelCase , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = 'sgugger/tiny-distilbert-classification'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , only_pretrain_model=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = 'sshleifer/tiny-gpt2'
__a = AutoConfig.from_pretrained(UpperCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCAmelCase , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase , [config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = 'sshleifer/tiny-gpt2'
__a = AutoConfig.from_pretrained(UpperCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase , [config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = 'sshleifer/tiny-gpt2'
__a = AutoConfig.from_pretrained(UpperCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase , [config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = 'patrickvonplaten/t5-tiny-random'
__a = AutoConfig.from_pretrained(UpperCAmelCase )
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = 'sshleifer/tiny-gpt2'
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCAmelCase , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCAmelCase , save_to_csv=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(UpperCAmelCase , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(UpperCAmelCase , 'env.csv' ) , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCAmelCase , 'env.csv' ) ).exists() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(UpperCAmelCase ):
self.assertTrue(hasattr(UpperCAmelCase , 'sequential' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'cumulative' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'current' ) )
self.assertTrue(hasattr(UpperCAmelCase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase , 'log.txt' ) , log_print=UpperCAmelCase , trace_memory_line_by_line=UpperCAmelCase , eager_mode=UpperCAmelCase , multi_process=UpperCAmelCase , )
__a = TensorFlowBenchmark(UpperCAmelCase )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(UpperCAmelCase , 'log.txt' ) ).exists() )
| 197 | from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 197 | 1 |
def lowerCamelCase__ (_UpperCAmelCase):
if len(_UpperCAmelCase) <= 1:
return [tuple(_UpperCAmelCase)]
SCREAMING_SNAKE_CASE = []
def generate(_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = [0] * n
res.append(tuple(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = arr[i], arr[0]
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = arr[i], arr[c[i]]
res.append(tuple(_UpperCAmelCase))
c[i] += 1
SCREAMING_SNAKE_CASE = 0
else:
SCREAMING_SNAKE_CASE = 0
i += 1
generate(len(_UpperCAmelCase) , _UpperCAmelCase)
return res
if __name__ == "__main__":
a_ : Tuple = input('Enter numbers separated by a comma:\n').strip()
a_ : str = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 137 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ : int = logging.getLogger(__name__)
a_ : List[str] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a_ : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
_lowercase : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A__ )} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _snake_case :
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''The input training data file (a text file).'''} )
_lowercase : Optional[str] = field(
default=A__ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
_lowercase : Optional[str] = field(
default=A__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
_lowercase : bool = field(default=A__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
_lowercase : float = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
_lowercase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
_lowercase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
_lowercase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
_lowercase : bool = field(
default=A__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
def _dataset(_UpperCAmelCase , _UpperCAmelCase=None):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask')
return LineByLineWithRefDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , ref_path=_UpperCAmelCase , )
return LineByLineTextDataset(tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size)
else:
return TextDataset(
tokenizer=_UpperCAmelCase , file_path=_UpperCAmelCase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_UpperCAmelCase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file)
elif args.train_data_files:
return ConcatDataset([_dataset(_UpperCAmelCase) for f in glob(args.train_data_files)])
else:
return _dataset(args.train_data_file , args.train_ref_file)
def lowerCamelCase__ ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.')
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.')
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase)
# Set seed
set_seed(training_args.seed)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
SCREAMING_SNAKE_CASE = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.')
if model_args.tokenizer_name:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name')
if model_args.model_name_or_path:
SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch')
SCREAMING_SNAKE_CASE = AutoModelWithLMHead.from_config(_UpperCAmelCase)
model.resize_token_embeddings(len(_UpperCAmelCase))
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).')
if data_args.block_size <= 0:
SCREAMING_SNAKE_CASE = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
SCREAMING_SNAKE_CASE = min(data_args.block_size , tokenizer.max_len)
# Get datasets
SCREAMING_SNAKE_CASE = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , cache_dir=model_args.cache_dir) if training_args.do_train else None
)
SCREAMING_SNAKE_CASE = (
get_dataset(_UpperCAmelCase , tokenizer=_UpperCAmelCase , evaluate=_UpperCAmelCase , cache_dir=model_args.cache_dir)
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
SCREAMING_SNAKE_CASE = DataCollatorForPermutationLanguageModeling(
tokenizer=_UpperCAmelCase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
SCREAMING_SNAKE_CASE = DataCollatorForWholeWordMask(
tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability)
else:
SCREAMING_SNAKE_CASE = DataCollatorForLanguageModeling(
tokenizer=_UpperCAmelCase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
SCREAMING_SNAKE_CASE = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , data_collator=_UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , prediction_loss_only=_UpperCAmelCase , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=_UpperCAmelCase)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info('*** Evaluate ***')
SCREAMING_SNAKE_CASE = trainer.evaluate()
SCREAMING_SNAKE_CASE = math.exp(eval_output['eval_loss'])
SCREAMING_SNAKE_CASE = {'perplexity': perplexity}
SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , 'eval_results_lm.txt')
if trainer.is_world_master():
with open(_UpperCAmelCase , 'w') as writer:
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info(' %s = %s' , _UpperCAmelCase , str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
results.update(_UpperCAmelCase)
return results
def lowerCamelCase__ (_UpperCAmelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 137 | 1 |
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 100 , ):
'''simple docstring'''
lowercase = x_start
lowercase = fnc(snake_case__ )
lowercase = 0.0
for _ in range(snake_case__ ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase = (x_end - x_start) / steps + xa
lowercase = fnc(snake_case__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase = xa
lowercase = fxa
return length
if __name__ == "__main__":
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowercase__ :Dict = 10
while i <= 10_0000:
print(F'With {i} steps: {line_length(f, -10, 10, i)}')
i *= 10
| 365 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Dict = {
"configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"],
"tokenization_luke": ["LukeTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"LUKE_PRETRAINED_MODEL_ARCHIVE_LIST",
"LukeForEntityClassification",
"LukeForEntityPairClassification",
"LukeForEntitySpanClassification",
"LukeForMultipleChoice",
"LukeForQuestionAnswering",
"LukeForSequenceClassification",
"LukeForTokenClassification",
"LukeForMaskedLM",
"LukeModel",
"LukePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowercase__ :Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 97 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ ( nn.Module ):
lowerCamelCase : Optional[Any] = 42
lowerCamelCase : List[Any] = jnp.floataa
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , UpperCAmelCase__ : Dict ) -> Dict:
lowerCAmelCase = hidden_states.shape
lowerCAmelCase = jax.image.resize(
a_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
lowerCAmelCase = self.conv(a_ )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
lowerCamelCase : Any = 42
lowerCamelCase : List[Any] = jnp.floataa
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , UpperCAmelCase__ : Any ) -> Tuple:
lowerCAmelCase = self.conv(a_ )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
lowerCamelCase : Tuple = 42
lowerCamelCase : Any = None
lowerCamelCase : Any = 0.0
lowerCamelCase : Any = None
lowerCamelCase : int = jnp.floataa
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = self.in_channels if self.out_channels is None else self.out_channels
lowerCAmelCase = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
lowerCAmelCase = nn.Conv(
a_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase = nn.Dense(a_ , dtype=self.dtype )
lowerCAmelCase = nn.GroupNorm(num_groups=3_2 , epsilon=1E-5 )
lowerCAmelCase = nn.Dropout(self.dropout_prob )
lowerCAmelCase = nn.Conv(
a_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowerCAmelCase = None
if use_nin_shortcut:
lowerCAmelCase = nn.Conv(
a_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=True ) -> Optional[int]:
lowerCAmelCase = hidden_states
lowerCAmelCase = self.norma(a_ )
lowerCAmelCase = nn.swish(a_ )
lowerCAmelCase = self.conva(a_ )
lowerCAmelCase = self.time_emb_proj(nn.swish(a_ ) )
lowerCAmelCase = jnp.expand_dims(jnp.expand_dims(a_ , 1 ) , 1 )
lowerCAmelCase = hidden_states + temb
lowerCAmelCase = self.norma(a_ )
lowerCAmelCase = nn.swish(a_ )
lowerCAmelCase = self.dropout(a_ , a_ )
lowerCAmelCase = self.conva(a_ )
if self.conv_shortcut is not None:
lowerCAmelCase = self.conv_shortcut(a_ )
return hidden_states + residual
| 4 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ,__UpperCamelCase ):
'''simple docstring'''
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = load_tool('''text-to-speech''' )
self.tool.setup()
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = self.tool('''hey''' )
__UpperCAmelCase : Union[str, Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__UpperCAmelCase : Optional[int] = self.tool('''hey''' )
__UpperCAmelCase : List[str] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 226 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def A__ ( lowerCamelCase ) -> str:
def decorator(lowerCamelCase ):
UpperCamelCase_: Optional[int] = getattr(lowerCamelCase , """handle_key""" , [] )
handle += [key]
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
def A__ ( *lowerCamelCase ) -> Tuple:
def decorator(lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = getattr(lowerCamelCase , """handle_key""" , [] )
handle += keys
setattr(lowerCamelCase , """handle_key""" , lowerCamelCase )
return func
return decorator
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __new__( cls : List[str] , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : List[str] ):
UpperCamelCase_: Dict = super().__new__(cls , snake_case_ , snake_case_ , snake_case_ )
if not hasattr(snake_case_ , """key_handler""" ):
setattr(snake_case_ , """key_handler""" , {} )
setattr(snake_case_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
UpperCamelCase_: Union[str, Any] = getattr(snake_case_ , """handle_key""" , [] )
for key in handled_keys:
UpperCamelCase_: Optional[Any] = value
return new_cls
@staticmethod
def lowerCAmelCase__ ( cls : Optional[Any] ):
UpperCamelCase_: str = get_character()
if char != KEYMAP["undefined"]:
UpperCamelCase_: Tuple = ord(snake_case_ )
UpperCamelCase_: Dict = cls.key_handler.get(snake_case_ )
if handler:
UpperCamelCase_: Optional[Any] = char
return handler(cls )
else:
return None
def A__ ( cls ) -> List[str]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 356 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase_ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
__UpperCamelCase : Optional[datasets.Features] = None
__UpperCamelCase : str = "utf-8"
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : Optional[str] = None
__UpperCamelCase : bool = True # deprecated
__UpperCamelCase : Optional[int] = None # deprecated
__UpperCamelCase : int = 10 << 20 # 10MB
__UpperCamelCase : Optional[bool] = None
class _UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
__UpperCamelCase : Tuple = JsonConfig
def lowerCAmelCase__ ( self : int ):
if self.config.block_size is not None:
logger.warning("""The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead""" )
UpperCamelCase_: List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"""The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.""" )
if self.config.newlines_in_values is not None:
raise ValueError("""The JSON loader parameter `newlines_in_values` is no longer supported""" )
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase__ ( self : Dict , snake_case_ : str ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCamelCase_: Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
UpperCamelCase_: List[Any] = data_files
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: str = [files]
UpperCamelCase_: Any = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
UpperCamelCase_: Dict = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
UpperCamelCase_: Tuple = [files]
UpperCamelCase_: Optional[int] = [dl_manager.iter_files(snake_case_ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"""files""": files} ) )
return splits
def lowerCAmelCase__ ( self : str , snake_case_ : pa.Table ):
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
UpperCamelCase_: Union[str, Any] = self.config.features.arrow_schema.field(snake_case_ ).type
UpperCamelCase_: Tuple = pa_table.append_column(snake_case_ , pa.array([None] * len(snake_case_ ) , type=snake_case_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase_: int = table_cast(snake_case_ , self.config.features.arrow_schema )
return pa_table
def lowerCAmelCase__ ( self : Dict , snake_case_ : Optional[Any] ):
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Dict = json.load(snake_case_ )
# We keep only the field we are interested in
UpperCamelCase_: Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case_ , (list, tuple) ):
UpperCamelCase_: Optional[int] = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: int = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
else:
UpperCamelCase_: Optional[int] = dataset
UpperCamelCase_: List[str] = pa.Table.from_pydict(snake_case_ )
yield file_idx, self._cast_table(snake_case_ )
# If the file has one json object per line
else:
with open(snake_case_ , """rb""" ) as f:
UpperCamelCase_: Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
UpperCamelCase_: Optional[int] = max(self.config.chunksize // 32 , 16 << 10 )
UpperCamelCase_: Tuple = (
self.config.encoding_errors if self.config.encoding_errors is not None else """strict"""
)
while True:
UpperCamelCase_: int = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
UpperCamelCase_: Tuple = batch.decode(self.config.encoding , errors=snake_case_ ).encode("""utf-8""" )
try:
while True:
try:
UpperCamelCase_: Tuple = paj.read_json(
io.BytesIO(snake_case_ ) , read_options=paj.ReadOptions(block_size=snake_case_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case_ , pa.ArrowInvalid )
and "straddling" not in str(snake_case_ )
or block_size > len(snake_case_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(snake_case_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
UpperCamelCase_: Optional[Any] = json.load(snake_case_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case_ , snake_case_ ): # list is the only sequence type supported in JSON
try:
UpperCamelCase_: Any = set().union(*[row.keys() for row in dataset] )
UpperCamelCase_: List[str] = {col: [row.get(snake_case_ ) for row in dataset] for col in keys}
UpperCamelCase_: int = pa.Table.from_pydict(snake_case_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(snake_case_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case_ )
batch_idx += 1
| 223 | 0 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : int = [randint(-1000 , 1000 ) for i in range(10 )]
lowerCamelCase__ : Optional[int] = randint(-5000 , 5000 )
return (arr, r)
A : List[str] = make_dataset()
def lowercase_ ( _A : Any , _A : Any ):
"""simple docstring"""
for triplet in permutations(UpperCamelCase__ , 3 ):
if sum(UpperCamelCase__ ) == target:
return tuple(sorted(UpperCamelCase__ ) )
return (0, 0, 0)
def lowercase_ ( _A : Optional[Any] , _A : List[str] ):
"""simple docstring"""
arr.sort()
lowerCamelCase__ : int = len(UpperCamelCase__ )
for i in range(n - 1 ):
lowerCamelCase__ , lowerCamelCase__ : str = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
lowerCamelCase__ : Tuple = "\ntriplet_sum1(*dataset)\n"
lowerCamelCase__ : Dict = "\ntriplet_sum2(*dataset)\n"
lowerCamelCase__ : Union[str, Any] = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=10000 )
lowerCamelCase__ : int = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=10000 )
return (min(UpperCamelCase__ ), min(UpperCamelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
A : int = solution_times()
print(f'The time for naive implementation is {times[0]}.')
print(f'The time for optimized implementation is {times[1]}.')
| 184 |
class lowercase :
def __init__( self , snake_case , snake_case , snake_case ):
snake_case_ = name
snake_case_ = value
snake_case_ = weight
def __repr__( self ):
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def a ( self ):
return self.value
def a ( self ):
return self.name
def a ( self ):
return self.weight
def a ( self ):
return self.value / self.weight
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
for i in range(len(UpperCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ )
snake_case_ = []
snake_case_ , snake_case_ = 0.0, 0.0
for i in range(len(UpperCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __lowerCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : bool = True , _lowerCamelCase : float = math.inf , _lowerCamelCase : float = -math.inf , _lowerCamelCase : float = math.inf , _lowerCamelCase : float = -math.inf , _lowerCamelCase : bool = False , _lowerCamelCase : float = 100 , _lowerCamelCase : float = 0.0_1 , _lowerCamelCase : float = 1 , ) -> Any:
'''simple docstring'''
__UpperCamelCase : str = False
__UpperCamelCase : List[str] = search_prob
__UpperCamelCase : str = start_temperate
__UpperCamelCase : List[str] = []
__UpperCamelCase : str = 0
__UpperCamelCase : Dict = None
while not search_end:
__UpperCamelCase : List[str] = current_state.score()
if best_state is None or current_score > best_state.score():
__UpperCamelCase : List[Any] = current_state
scores.append(_lowerCamelCase)
iterations += 1
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : List[str] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__UpperCamelCase : Optional[Any] = random.randint(0 , len(_lowerCamelCase) - 1) # picking a random neighbor
__UpperCamelCase : Union[str, Any] = neighbors.pop(_lowerCamelCase)
__UpperCamelCase : List[Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__UpperCamelCase : List[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__UpperCamelCase : str = picked_neighbor
else:
__UpperCamelCase : List[Any] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__UpperCamelCase : Optional[Any] = picked_neighbor
__UpperCamelCase : Union[str, Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__UpperCamelCase : int = True
else:
__UpperCamelCase : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowerCamelCase) , _lowerCamelCase)
plt.xlabel("Iterations")
plt.ylabel("Function values")
plt.show()
return best_state
if __name__ == "__main__":
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int) -> Optional[Any]:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowercase : List[str] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase : Dict = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
# starting the problem with initial coordinates (12, 47)
lowercase : Dict = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowercase : Union[str, Any] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]) -> int:
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowercase : Any = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase : Optional[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"{local_min.score()}"
)
lowercase : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowercase : Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"{local_min.score()}"
) | 151 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : int) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0) == 0)
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
assert and_gate(0 , 0) == 0
assert and_gate(0 , 1) == 0
assert and_gate(1 , 0) == 0
assert and_gate(1 , 1) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 151 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_a = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''DPTFeatureExtractor''']
_a = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322 | """simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
_UpperCAmelCase = TapasConfig.from_json_file(lowercase )
# set absolute/relative position embeddings parameter
_UpperCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WTQ":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = True
# hparam_utils.py hparams
_UpperCAmelCase = 0.66_46_94
_UpperCAmelCase = 0.20_79_51
_UpperCAmelCase = 0.12_11_94
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = 0.0_35_25_13
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
_UpperCAmelCase = 4
_UpperCAmelCase = False
# hparam_utils.py hparams
_UpperCAmelCase = 36.45_19
_UpperCAmelCase = 0.90_34_21
_UpperCAmelCase = 2_22.0_88
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0.76_31_41
_UpperCAmelCase = TapasForQuestionAnswering(config=lowercase )
elif task == "TABFACT":
_UpperCAmelCase = TapasForSequenceClassification(config=lowercase )
elif task == "MLM":
_UpperCAmelCase = TapasForMaskedLM(config=lowercase )
elif task == "INTERMEDIATE_PRETRAINING":
_UpperCAmelCase = TapasModel(config=lowercase )
else:
raise ValueError(f'''Task {task} not supported.''' )
print(f'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowercase ,lowercase ,lowercase )
# Save pytorch-model (weights and configuration)
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase )
# Save tokenizer files
print(f'''Save tokenizer files to {pytorch_dump_path}''' )
_UpperCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" ,model_max_length=5_12 )
tokenizer.save_pretrained(lowercase )
print("""Used relative position embeddings:""" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 289 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _a ( _lowerCAmelCase , unittest.TestCase ):
A = DiTPipeline
A = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
A = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A = False
def __snake_case (self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_: List[str] = TransformeraDModel(
sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=SCREAMING_SNAKE_CASE_, activation_fn="""gelu-approximate""", num_embeds_ada_norm=1000, norm_type="""ada_norm_zero""", norm_elementwise_affine=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Any = AutoencoderKL()
UpperCAmelCase_: str = DDIMScheduler()
UpperCAmelCase_: List[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[str]:
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
UpperCAmelCase_: Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: int = """cpu"""
UpperCAmelCase_: Tuple = self.get_dummy_components()
UpperCAmelCase_: Dict = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = pipe(**SCREAMING_SNAKE_CASE_ ).images
UpperCAmelCase_: str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 16, 16, 3) )
UpperCAmelCase_: Optional[int] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
UpperCAmelCase_: str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_, 1E-3 )
def __snake_case (self ) -> Optional[Any]:
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE_, expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(), reason="""XFormers attention is only available with CUDA and `xformers` installed""", )
def __snake_case (self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class _a ( unittest.TestCase ):
def __snake_case (self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: int = torch.manual_seed(0 )
UpperCAmelCase_: Optional[int] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
UpperCAmelCase_: Optional[int] = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
UpperCAmelCase_: Optional[int] = pipe.get_label_ids(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = pipe(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=40, output_type="""np""" ).images
for word, image in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Dict = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: str = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
UpperCAmelCase_: List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
UpperCAmelCase_: int = ["""vase""", """umbrella"""]
UpperCAmelCase_: Dict = pipe.get_label_ids(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase_: Dict = pipe(SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=25, output_type="""np""" ).images
for word, image in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 82 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
a : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : str = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
a : Dict = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
a : Optional[Any] = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ElectraTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="[UNK]", SCREAMING_SNAKE_CASE_="[SEP]", SCREAMING_SNAKE_CASE_="[PAD]", SCREAMING_SNAKE_CASE_="[CLS]", SCREAMING_SNAKE_CASE_="[MASK]", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]:
super().__init__(
SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, do_lower_case=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, tokenize_chinese_chars=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Optional[int] = getattr(SCREAMING_SNAKE_CASE_, normalizer_state.pop("""type""" ) )
UpperCAmelCase_: Union[str, Any] = do_lower_case
UpperCAmelCase_: Dict = strip_accents
UpperCAmelCase_: List[Any] = tokenize_chinese_chars
UpperCAmelCase_: int = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = do_lower_case
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
UpperCAmelCase_: Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: Optional[int] = [self.sep_token_id]
UpperCAmelCase_: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Tuple = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 82 | 1 |
def a_ ( _A=28123 ) -> int:
"""simple docstring"""
snake_case__ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
snake_case__ = set()
snake_case__ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 307 |
def lowerCamelCase__ ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(snake_case_ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'{solution() = }')
| 24 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def snake_case (UpperCAmelCase__ ) -> Optional[int]: # picklable for multiprocessing
return x.sum()
def snake_case (UpperCAmelCase__ ) -> Any: # picklable for multiprocessing
return i + 1
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
a : int
a : str
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
def _a ( self ):
UpperCamelCase_: Optional[Any] = {}
UpperCamelCase_: List[str] = []
UpperCamelCase_: Any = 1
UpperCamelCase_: Optional[int] = [1, 2]
UpperCamelCase_: List[str] = {'a': 1, 'b': 2}
UpperCamelCase_: Tuple = {'a': [1, 2], 'b': [3, 4]}
UpperCamelCase_: Optional[int] = {'a': {'1': 1}, 'b': 2}
UpperCamelCase_: Optional[Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
UpperCamelCase_: Tuple = {}
UpperCamelCase_: str = []
UpperCamelCase_: List[Any] = 2
UpperCamelCase_: List[Any] = [2, 3]
UpperCamelCase_: Optional[Any] = {'a': 2, 'b': 3}
UpperCamelCase_: List[str] = {'a': [2, 3], 'b': [4, 5]}
UpperCamelCase_: Any = {'a': {'1': 2}, 'b': 3}
UpperCamelCase_: List[str] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_: Optional[int] = 2
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_: Tuple = {'a': np.eye(2 ), 'b': np.zeros(3 ), 'c': np.ones(2 )}
UpperCamelCase_: Tuple = {'a': 2, 'b': 0, 'c': 2}
UpperCamelCase_: str = {
'a': np.eye(2 ).astype(_lowerCamelCase ),
'b': np.zeros(3 ).astype(_lowerCamelCase ),
'c': np.ones(2 ).astype(_lowerCamelCase ),
}
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase , num_proc=_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCamelCase , _lowerCamelCase , map_numpy=_lowerCamelCase , num_proc=_lowerCamelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCamelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCamelCase : x + 1 , _lowerCamelCase , num_proc=_lowerCamelCase )
def _a ( self ):
UpperCamelCase_: Optional[Any] = {'a': 1, 'b': 2}
UpperCamelCase_: Dict = {'a': 3, 'b': 4}
UpperCamelCase_: Optional[int] = {'a': 5, 'b': 6}
UpperCamelCase_: int = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ) , _lowerCamelCase )
def _a ( self ):
class _lowerCAmelCase:
"""simple docstring"""
a : str ='''bar'''
UpperCamelCase_: int = Foo()
self.assertEqual(foo.my_attr , 'bar' )
with temporary_assignment(_lowerCamelCase , 'my_attr' , 'BAR' ):
self.assertEqual(foo.my_attr , 'BAR' )
self.assertEqual(foo.my_attr , 'bar' )
@pytest.mark.parametrize(
'iterable_length, num_proc, expected_num_proc' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(1_6, 1_6, 1_6),
(1_6, 1_7, 1_6),
(1_7, 1_6, 1_6),
] , )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch(
'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool:
UpperCamelCase_: Any = {F'''{i}''': i for i in range(UpperCAmelCase__ )}
UpperCamelCase_: int = map_nested(lambda UpperCAmelCase__ : x + 1_0 , UpperCAmelCase__ , num_proc=UpperCAmelCase__ , parallel_min_length=1_6 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@require_tf
def _a ( self ):
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase_: Dict = layers.Dense(2 )
def gen_random_output():
UpperCamelCase_: Optional[Any] = tf.random.uniform((1, 3) )
return model(_lowerCamelCase ).numpy()
with temp_seed(4_2 , set_tensorflow=_lowerCamelCase ):
UpperCamelCase_: int = gen_random_output()
with temp_seed(4_2 , set_tensorflow=_lowerCamelCase ):
UpperCamelCase_: List[str] = gen_random_output()
UpperCamelCase_: str = gen_random_output()
np.testing.assert_equal(_lowerCamelCase , _lowerCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def _a ( self ):
import torch
def gen_random_output():
UpperCamelCase_: Any = torch.nn.Linear(3 , 2 )
UpperCamelCase_: Optional[Any] = torch.rand(1 , 3 )
return model(_lowerCamelCase ).detach().numpy()
with temp_seed(4_2 , set_pytorch=_lowerCamelCase ):
UpperCamelCase_: Dict = gen_random_output()
with temp_seed(4_2 , set_pytorch=_lowerCamelCase ):
UpperCamelCase_: str = gen_random_output()
UpperCamelCase_: str = gen_random_output()
np.testing.assert_equal(_lowerCamelCase , _lowerCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def _a ( self ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
UpperCamelCase_: Optional[Any] = gen_random_output()
with temp_seed(4_2 ):
UpperCamelCase_: Tuple = gen_random_output()
UpperCamelCase_: Optional[int] = gen_random_output()
np.testing.assert_equal(_lowerCamelCase , _lowerCamelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('input_data' , [{}] )
def snake_case (UpperCAmelCase__ ) -> Dict:
UpperCamelCase_: str = NestedDataStructure(UpperCAmelCase__ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'data, expected_output' , [
({}, []),
([], []),
('foo', ['foo']),
(['foo', 'bar'], ['foo', 'bar']),
([['foo', 'bar']], ['foo', 'bar']),
([[['foo'], ['bar']]], ['foo', 'bar']),
([[['foo'], 'bar']], ['foo', 'bar']),
({'a': 1, 'b': 2}, [1, 2]),
({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]),
({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]),
({'a': {'1': 1}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': 2}, [1, 2]),
({'a': {'1': [1]}, 'b': [2]}, [1, 2]),
] , )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
UpperCamelCase_: Optional[Any] = NestedDataStructure(UpperCAmelCase__ ).flatten()
assert output == expected_output
def snake_case () -> Optional[int]:
UpperCamelCase_: List[Any] = A(x=1 , y='foobar' )
UpperCamelCase_: Optional[int] = {'x': 1, 'y': 'foobar'}
assert asdict(UpperCAmelCase__ ) == expected_output
UpperCamelCase_: List[str] = {'a': {'b': A(x=1_0 , y='foo' )}, 'c': [A(x=2_0 , y='bar' )]}
UpperCamelCase_: Tuple = {'a': {'b': {'x': 1_0, 'y': 'foo'}}, 'c': [{'x': 2_0, 'y': 'bar'}]}
assert asdict(UpperCAmelCase__ ) == expected_output
with pytest.raises(UpperCAmelCase__ ):
asdict([1, A(x=1_0 , y='foo' )] )
def snake_case (UpperCAmelCase__ ) -> Optional[Any]:
return text.split()
def snake_case (UpperCAmelCase__ ) -> str:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def snake_case () -> Union[str, Any]:
with Pool(2 ) as pool:
UpperCamelCase_: Optional[Any] = list(iflatmap_unordered(UpperCAmelCase__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(UpperCAmelCase__ ) == 2_0
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase_: Optional[int] = list(iflatmap_unordered(UpperCAmelCase__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 1_0 ) )
assert out.count('hello' ) == 1_0
assert out.count('there' ) == 1_0
assert len(UpperCAmelCase__ ) == 2_0
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase_: Any = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase__ )
assert out.count('a' ) == 2
assert out.count('b' ) == 2
assert len(UpperCAmelCase__ ) == 4 | 355 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ : int = logging.get_logger(__name__)
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : str =['''pixel_values''']
def __init__( self , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BICUBIC , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = 1 / 2_5_5 , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
UpperCamelCase_: Any = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase_: Any = get_size_dict(_lowerCamelCase )
UpperCamelCase_: Any = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase_: List[Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name='crop_size' )
UpperCamelCase_: Optional[int] = do_resize
UpperCamelCase_: Tuple = do_rescale
UpperCamelCase_: Dict = do_normalize
UpperCamelCase_: Optional[int] = do_center_crop
UpperCamelCase_: Tuple = crop_size
UpperCamelCase_: Optional[int] = size
UpperCamelCase_: Dict = resample
UpperCamelCase_: Tuple = rescale_factor
UpperCamelCase_: Tuple = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase_: Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: Optional[Any] = get_size_dict(_lowerCamelCase )
if "shortest_edge" in size:
UpperCamelCase_: str = get_resize_output_image_size(_lowerCamelCase , size=size['shortest_edge'] , default_to_square=_lowerCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCamelCase_: Any = (size['height'], size['width'])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
UpperCamelCase_: List[str] = get_size_dict(_lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_lowerCamelCase , size=(size['height'], size['width']) , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase ):
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase , ):
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ):
UpperCamelCase_: List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: str = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_: Dict = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_: Optional[Any] = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_: List[str] = get_size_dict(_lowerCamelCase , param_name='crop_size' , default_to_square=_lowerCamelCase )
UpperCamelCase_: Tuple = resample if resample is not None else self.resample
UpperCamelCase_: str = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_: Dict = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_: Dict = image_std if image_std is not None else self.image_std
UpperCamelCase_: Any = size if size is not None else self.size
UpperCamelCase_: Optional[Any] = get_size_dict(_lowerCamelCase )
if not is_batched(_lowerCamelCase ):
UpperCamelCase_: Dict = [images]
if not valid_images(_lowerCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
UpperCamelCase_: List[str] = [to_numpy_array(_lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase_: List[str] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase_: List[str] = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase_: int = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_: Optional[Any] = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) for image in images]
UpperCamelCase_: List[str] = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images]
UpperCamelCase_: Optional[int] = {'pixel_values': images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase ) | 292 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase_ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowerCAmelCase_ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = AudioClassificationPipeline(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
# test with a raw waveform
lowercase_ : Tuple = np.zeros((3_40_00,) )
lowercase_ : str = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ , lowercase_ : List[str] = examples
lowercase_ : Tuple = audio_classifier(__SCREAMING_SNAKE_CASE )
# by default a model is initialized with num_labels=2
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
lowercase_ : int = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=1 )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
self.run_torchaudio(__SCREAMING_SNAKE_CASE )
@require_torchaudio
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
import datasets
# test with a local file
lowercase_ : Optional[int] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
lowercase_ : Any = dataset[0]['''audio''']['''array''']
lowercase_ : Tuple = audio_classifier(__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
{'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )},
] , )
@require_torch
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : str = '''anton-l/wav2vec2-random-tiny-classifier'''
lowercase_ : str = pipeline('''audio-classification''' , model=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = np.ones((80_00,) )
lowercase_ : List[Any] = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4 )
lowercase_ : Dict = [
{'''score''': 0.0_842, '''label''': '''no'''},
{'''score''': 0.0_838, '''label''': '''up'''},
{'''score''': 0.0_837, '''label''': '''go'''},
{'''score''': 0.0_834, '''label''': '''right'''},
]
lowercase_ : Optional[int] = [
{'''score''': 0.0_845, '''label''': '''stop'''},
{'''score''': 0.0_844, '''label''': '''on'''},
{'''score''': 0.0_841, '''label''': '''right'''},
{'''score''': 0.0_834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase_ : List[str] = {'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase_ : Dict = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4 )
self.assertIn(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _snake_case ( self ):
"""simple docstring"""
import datasets
lowercase_ : Any = '''superb/wav2vec2-base-superb-ks'''
lowercase_ : Optional[Any] = pipeline('''audio-classification''' , model=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
lowercase_ : int = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
lowercase_ : Optional[Any] = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def _snake_case ( self ):
"""simple docstring"""
pass
| 93 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = (UnCLIPScheduler,)
def __lowerCamelCase ( self , **__lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {
'''num_train_timesteps''': 1_0_0_0,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**__lowerCAmelCase )
return config
def __lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__lowerCAmelCase , prev_timestep=__lowerCAmelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.999_4987 ) ) < 1E-5
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type='''learned_range''' )
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=__lowerCAmelCase ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_8_7 , predicted_variance=__lowerCAmelCase ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_9_9 , predicted_variance=__lowerCAmelCase ) - -0.001_0011 < 1E-5
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(2_5 )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(__lowerCAmelCase ):
# 1. predict noise residual
lowerCamelCase__ = model(__lowerCAmelCase , __lowerCAmelCase )
if i + 1 == timesteps.shape[0]:
lowerCamelCase__ = None
else:
lowerCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , prev_timestep=__lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(__lowerCAmelCase ) )
lowerCamelCase__ = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
def __lowerCamelCase ( self ):
'''simple docstring'''
pass
| 209 | 0 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = ["audio_values", "audio_mask"]
def __init__(self , __a=20_48 , __a=1 , __a=[16, 16] , __a=1_28 , __a=4_41_00 , __a=86 , __a=20_48 , __a=0.0 , **__a , ) -> Optional[Any]:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , **__a , )
UpperCamelCase = spectrogram_length
UpperCamelCase = num_channels
UpperCamelCase = patch_size
UpperCamelCase = feature_size // self.patch_size[1]
UpperCamelCase = n_fft
UpperCamelCase = sampling_rate // hop_length_to_sampling_rate
UpperCamelCase = sampling_rate
UpperCamelCase = padding_value
UpperCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=__a , norm="slaney" , mel_scale="slaney" , ).T
def snake_case_ (self , __a ) -> np.ndarray:
UpperCamelCase = spectrogram(
__a , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCamelCase = log_spec[:, :-1]
UpperCamelCase = log_spec - 20.0
UpperCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , __a , __a = None , __a = True , __a = None , __a = False , __a = False , **__a , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCamelCase = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
UpperCamelCase = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
UpperCamelCase = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __a ):
UpperCamelCase = [np.asarray(__a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCamelCase = np.array(__a ).astype(np.floataa )
# convert into correct format for padding
UpperCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCamelCase = np.ones([len(__a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCamelCase = padded_audio_features * self.padding_value
for i in range(len(__a ) ):
UpperCamelCase = audio_features[i]
UpperCamelCase = feature
# return as BatchFeature
if return_attention_mask:
UpperCamelCase = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
UpperCamelCase = {"audio_values": padded_audio_features}
UpperCamelCase = BatchFeature(data=__a , tensor_type=__a )
return encoded_inputs
| 244 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = math.inf , _SCREAMING_SNAKE_CASE = -math.inf , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 0.01 , _SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
UpperCamelCase = False
UpperCamelCase = search_prob
UpperCamelCase = start_temperate
UpperCamelCase = []
UpperCamelCase = 0
UpperCamelCase = None
while not search_end:
UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCamelCase = current_state
scores.append(_SCREAMING_SNAKE_CASE )
iterations += 1
UpperCamelCase = None
UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCamelCase = random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) # picking a random neighbor
UpperCamelCase = neighbors.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCamelCase = picked_neighbor
else:
UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCamelCase = picked_neighbor
UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCamelCase = True
else:
UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
f'''{local_min.score()}'''
)
| 244 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( __lowercase : list[int] ) -> bool:
'''simple docstring'''
return len(set(__lowercase ) ) == len(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase : int ) -> list[int]:
lowerCamelCase_ = [True] * limit
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowerCamelCase_ = i * 2
while index < limit:
lowerCamelCase_ = False
lowerCamelCase_ = index + i
lowerCamelCase_ = [2]
for i in range(3 , _lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(_lowerCamelCase )
return primes
def lowerCamelCase__ ( _lowerCamelCase : int = 1000000 ) -> int:
lowerCamelCase_ = prime_sieve(_lowerCamelCase )
lowerCamelCase_ = 0
lowerCamelCase_ = 0
for i in range(len(_lowerCamelCase ) ):
for j in range(i + length , len(_lowerCamelCase ) ):
lowerCamelCase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowerCamelCase_ = j - i
lowerCamelCase_ = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 183 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ : List[Any] = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
UpperCAmelCase_ : Union[str, Any] = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
UpperCAmelCase_ : Optional[int] = "▁"
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : int = AlbertTokenizer
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="<unk>" , UpperCAmelCase__="[SEP]" , UpperCAmelCase__="<pad>" , UpperCAmelCase__="[CLS]" , UpperCAmelCase__="[MASK]" , **UpperCAmelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A__ = (
AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ , normalized=lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
else mask_token
)
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , )
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
lowerCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ):
copyfile(self.vocab_file , lowerCamelCase_ )
return (out_vocab_file,)
| 361 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : List[Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : int = """gpt_neox"""
def __init__( self , UpperCAmelCase__=50_432 , UpperCAmelCase__=6_144 , UpperCAmelCase__=44 , UpperCAmelCase__=64 , UpperCAmelCase__=24_576 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.25 , UpperCAmelCase__=10_000 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.1 , UpperCAmelCase__=2_048 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-5 , UpperCAmelCase__=True , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__=False , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ):
super().__init__(bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = attention_dropout
A__ = hidden_dropout
A__ = classifier_dropout
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = tie_word_embeddings
A__ = use_parallel_residual
A__ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def __A ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , UpperCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F"""got {self.rope_scaling}""" )
A__ = self.rope_scaling.get("type" , UpperCAmelCase__ )
A__ = self.rope_scaling.get("factor" , UpperCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 198 | 0 |
'''simple docstring'''
import inspect
import unittest
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
try:
import diffusers # noqa: F401
except ImportError:
assert False
def a__ (self ) -> Any:
"""simple docstring"""
import diffusers
from diffusers.dependency_versions_table import deps
_a = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
_a = '''k-diffusion'''
elif backend == "invisible_watermark":
_a = '''invisible-watermark'''
assert backend in deps, f'''{backend} is not in the deps table!'''
| 211 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) != 32:
raise ValueError('''Input must be of length 32''')
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def lowerCAmelCase (__A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
_a = format(__A , '''08x''')[-8:]
_a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''')
return little_endian_hex
def lowerCAmelCase (__A):
"""simple docstring"""
_a = b''''''
for char in message:
bit_string += format(__A , '''08b''').encode('''utf-8''')
_a = format(len(__A) , '''064b''').encode('''utf-8''')
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__A) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32])
return bit_string
def lowerCAmelCase (__A):
"""simple docstring"""
if len(__A) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''')
for pos in range(0 , len(__A) , 512):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32):
block_words.append(int(to_little_endian(block[i : i + 32]) , 2))
yield block_words
def lowerCAmelCase (__A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
_a = format(__A , '''032b''')
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__A , 2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
return (a + b) % 2**32
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if i < 0:
raise ValueError('''Input must be non-negative''')
if shift < 0:
raise ValueError('''Shift must be non-negative''')
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def lowerCAmelCase (__A):
"""simple docstring"""
_a = preprocess(__A)
_a = [int(2**32 * abs(sin(i + 1))) for i in range(64)]
# Starting states
_a = 0x67_452_301
_a = 0xEF_CDA_B89
_a = 0x98_BAD_CFE
_a = 0x10_325_476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__A):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(__A))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(__A , left_rotate_aa(__A , shift_amounts[i]))
# Add hashed chunk to running total
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = sum_aa(__A , __A)
_a = reformat_hex(__A) + reformat_hex(__A) + reformat_hex(__A) + reformat_hex(__A)
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 211 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
_A = self.unet.config.sample_size / self.unet.config.sample_rate
_A = audio_length_in_s * self.unet.config.sample_rate
_A = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
_A = int(lowerCAmelCase_ )
if sample_size % down_scale_factor != 0:
_A = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
_A = int(lowerCAmelCase_ )
_A = next(iter(self.unet.parameters() ) ).dtype
_A = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(lowerCAmelCase_ )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_A = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase_ , device=audio.device )
_A = self.scheduler.timesteps.to(lowerCAmelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_A = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# 2. compute previous image: x_t -> t_t-1
_A = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
_A = audio.clamp(-1 , 1 ).float().cpu().numpy()
_A = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCAmelCase_ )
| 81 | import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
_SCREAMING_SNAKE_CASE = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def snake_case ( snake_case__ :List[str] , snake_case__ :Dict) -> str:
_A = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_A = int(re.match(R""".*layer_(\d*).*""" , snake_case__)[1])
layer_number -= 3
return F'''h.{layer_number}.''' + key
def snake_case ( snake_case__ :Tuple) -> int:
if dtype == torch.bool:
return 1 / 8
_A = re.search(R"""[^\d](\d+)$""" , str(snake_case__))
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''')
_A = int(bit_search.groups()[0])
return bit_size // 8
def snake_case ( snake_case__ :Dict , snake_case__ :Any , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :List[Any]) -> List[str]:
# Construct model
if bloom_config_file == "":
_A = BloomConfig()
else:
_A = BloomConfig.from_json_file(snake_case__)
if shard_model:
_A = os.listdir(snake_case__)
_A = sorted(filter(lambda snake_case__: s.startswith("""layer""") and "model_00" in s , snake_case__))
_A = {"""weight_map""": {}, """metadata""": {}}
_A = 0
_A = None
_A = BloomConfig()
for j, file in enumerate(snake_case__):
print("""Processing file: {}""".format(snake_case__))
_A = None
for i in range(snake_case__):
# load all TP files
_A = file.replace("""model_00""" , F'''model_0{i}''')
_A = torch.load(os.path.join(snake_case__ , snake_case__) , map_location="""cpu""")
# Rename keys in the transformers names
_A = list(temp.keys())
for key in keys:
_A = temp.pop(snake_case__)
if tensors is None:
_A = temp
else:
for key in tensors.keys():
if any(key.endswith(snake_case__) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
# We concatenate these weights accross TP ranks
_A = torch.cat([tensors[key], temp[key]] , dim=snake_case__)
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case__) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
_A = tensors[key] / pretraining_tp
torch.save(
snake_case__ , os.path.join(
snake_case__ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1).zfill(5) , str(len(snake_case__)).zfill(5)) , ) , )
for key in tensors.keys():
_A = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype)
if key not in index_dict["weight_map"]:
_A = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1).zfill(5) , str(len(snake_case__)).zfill(5))
_A = BloomConfig()
_A = pytorch_dump_folder_path + """/""" + CONFIG_NAME
_A = total_size
with open(snake_case__ , """w""" , encoding="""utf-8""") as f:
f.write(config.to_json_string())
with open(os.path.join(snake_case__ , WEIGHTS_NAME + """.index.json""") , """w""" , encoding="""utf-8""") as f:
_A = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__) + """\n"""
f.write(snake_case__)
else:
_A = BloomModel(snake_case__)
_A = os.listdir(snake_case__)
_A = sorted(filter(lambda snake_case__: s.startswith("""layer""") and "model_00" in s , snake_case__))
_A = None
for i, file in enumerate(snake_case__):
_A = None
for i in range(snake_case__):
# load all TP files
_A = file.replace("""model_00""" , F'''model_0{i}''')
_A = torch.load(os.path.join(snake_case__ , snake_case__) , map_location="""cpu""")
# Rename keys in the transformers names
_A = list(temp.keys())
for key in keys:
_A = temp.pop(snake_case__)
if tensors is None:
_A = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(snake_case__) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_A = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN) else 0
# We concatenate these weights accross TP ranks
_A = torch.cat([tensors[key], temp[key]] , dim=snake_case__)
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case__) for end in WEIGHTS_TO_AVERAGE_ENDSWITH):
_A = tensors[key] / pretraining_tp
_A = model.load_state_dict(snake_case__ , strict=snake_case__)
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
_A = set(other_keys.missing_keys)
else:
_A = missing_keys.intersection(set(other_keys.missing_keys))
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(snake_case__ , exist_ok=snake_case__)
_A = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
_A = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''')
if config.torch_dtype is not None:
_A = model.to(config.torch_dtype)
torch.save(model.state_dict() , snake_case__)
print(F'''Save configuration file to {pytorch_config_dump_path}''')
with open(snake_case__ , """w""" , encoding="""utf-8""") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 81 | 1 |
from __future__ import annotations
from collections.abc import Generator
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : dict[int, int] = {}
UpperCamelCase__ : str = 2
while True:
UpperCamelCase__ : int = factor_map.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if factor:
UpperCamelCase__ : Tuple = factor + prime
while x in factor_map:
x += factor
UpperCamelCase__ : str = factor
else:
UpperCamelCase__ : Any = prime
yield prime
prime += 1
def _a ( SCREAMING_SNAKE_CASE : float = 1E10 ):
"""simple docstring"""
UpperCamelCase__ : Tuple = sieve()
UpperCamelCase__ : List[Any] = 1
while True:
UpperCamelCase__ : int = next(SCREAMING_SNAKE_CASE )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(SCREAMING_SNAKE_CASE )
n += 2
if __name__ == "__main__":
print(solution())
| 146 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__UpperCamelCase : Tuple = logging.getLogger(__name__)
__UpperCamelCase : str = tf.data.AUTOTUNE
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : str = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=SCREAMING_SNAKE_CASE , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=SCREAMING_SNAKE_CASE , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=SCREAMING_SNAKE_CASE , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=SCREAMING_SNAKE_CASE , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=SCREAMING_SNAKE_CASE , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=SCREAMING_SNAKE_CASE , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=SCREAMING_SNAKE_CASE , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=SCREAMING_SNAKE_CASE , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=SCREAMING_SNAKE_CASE , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=SCREAMING_SNAKE_CASE , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=SCREAMING_SNAKE_CASE , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=SCREAMING_SNAKE_CASE , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=SCREAMING_SNAKE_CASE , help='''Model ID to upload to on the Hugging Face Hub.''' )
UpperCamelCase__ : str = parser.parse_args()
return args
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
try:
if args.tpu_name:
UpperCamelCase__ : Optional[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
UpperCamelCase__ : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(SCREAMING_SNAKE_CASE )
tf.tpu.experimental.initialize_tpu_system(SCREAMING_SNAKE_CASE )
return tpu
def _a ( SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = 0
for file in file_list:
UpperCamelCase__ : List[str] = file.split('''/''' )[-1]
UpperCamelCase__ : Optional[Any] = re.search(r'''-\d+-(\d+)\.tfrecord''' , SCREAMING_SNAKE_CASE ).group(1 )
UpperCamelCase__ : List[Any] = int(SCREAMING_SNAKE_CASE )
num_samples += sample_count
return num_samples
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any=None ):
"""simple docstring"""
UpperCamelCase__ : int = count_samples(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = tf.data.Dataset.from_tensor_slices(SCREAMING_SNAKE_CASE )
if shuffle:
UpperCamelCase__ : Any = dataset.shuffle(len(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Tuple = tf.data.TFRecordDataset(SCREAMING_SNAKE_CASE , num_parallel_reads=SCREAMING_SNAKE_CASE )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
UpperCamelCase__ : Union[str, Any] = dataset.apply(tf.data.experimental.assert_cardinality(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : List[Any] = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
if shuffle:
assert shuffle_buffer_size is not None
UpperCamelCase__ : Dict = dataset.shuffle(args.shuffle_buffer_size )
UpperCamelCase__ : int = dataset.batch(SCREAMING_SNAKE_CASE , drop_remainder=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE , num_parallel_calls=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = dataset.prefetch(SCREAMING_SNAKE_CASE )
return dataset
def _a ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not args.no_tpu:
UpperCamelCase__ : List[Any] = initialize_tpu(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = tf.distribute.TPUStrategy(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : Any = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
UpperCamelCase__ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer )
UpperCamelCase__ : List[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
UpperCamelCase__ : Dict = tokenizer.vocab_size
UpperCamelCase__ : int = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(F"No .tfrecord files found in {args.train_dataset}." )
UpperCamelCase__ : int = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(F"No .tfrecord files found in {args.eval_dataset}." )
UpperCamelCase__ : str = count_samples(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
UpperCamelCase__ : List[Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
UpperCamelCase__ : List[str] = TFAutoModelForMaskedLM.from_config(SCREAMING_SNAKE_CASE )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
UpperCamelCase__ , UpperCamelCase__ : int = create_optimizer(
num_train_steps=SCREAMING_SNAKE_CASE , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=SCREAMING_SNAKE_CASE , metrics=['''accuracy'''] )
def decode_fn(SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCamelCase__ : Tuple = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
UpperCamelCase__ : Optional[int] = DataCollatorForLanguageModeling(
tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=args.mlm_probability , mlm=SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
def mask_with_collator(SCREAMING_SNAKE_CASE : int ):
# TF really needs an isin() function
UpperCamelCase__ : Optional[int] = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
UpperCamelCase__ , UpperCamelCase__ : List[str] = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(SCREAMING_SNAKE_CASE ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=SCREAMING_SNAKE_CASE , )
return batch
UpperCamelCase__ : Tuple = args.per_replica_batch_size * strategy.num_replicas_in_sync
UpperCamelCase__ : List[str] = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , shuffle_buffer_size=args.shuffle_buffer_size , )
UpperCamelCase__ : Optional[int] = prepare_dataset(
SCREAMING_SNAKE_CASE , decode_fn=SCREAMING_SNAKE_CASE , mask_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , shuffle=SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : List[Any] = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=SCREAMING_SNAKE_CASE ) )
model.fit(
SCREAMING_SNAKE_CASE , validation_data=SCREAMING_SNAKE_CASE , epochs=args.num_epochs , callbacks=SCREAMING_SNAKE_CASE , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__UpperCamelCase : Dict = parse_args()
main(args)
| 146 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : str = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 331 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = (DDPMScheduler,)
def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
__SCREAMING_SNAKE_CASE = -1
else:
__SCREAMING_SNAKE_CASE = timesteps[i + 1]
__SCREAMING_SNAKE_CASE = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 331 | 1 |
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Any =0
a__ : List[Any] =number
while duplicate > 0:
a__ , a__ : Dict =divmod(SCREAMING_SNAKE_CASE , 10 )
fact_sum += factorial(SCREAMING_SNAKE_CASE )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
UpperCAmelCase : Optional[Any] = int(input("""Enter number: """).strip())
print(
F"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 95 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( _a , unittest.TestCase ):
'''simple docstring'''
a__ = LongformerTokenizer
a__ = True
a__ = LongformerTokenizerFast
a__ = True
def lowerCAmelCase_ (self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__UpperCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCAmelCase = {'''unk_token''': '''<unk>'''}
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowercase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowercase__ ) )
def lowerCAmelCase_ (self , **lowercase__ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , **lowercase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ )
def lowerCAmelCase_ (self , lowercase__ ) -> Dict:
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = '''lower newer'''
return input_text, output_text
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCAmelCase = '''lower newer'''
__UpperCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCAmelCase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True)
self.assertListEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokens + [tokenizer.unk_token]
__UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=lowercase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def lowerCAmelCase_ (self ) -> int:
__UpperCAmelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
__UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = self.get_tokenizer()
__UpperCAmelCase = '''Encode this sequence.'''
__UpperCAmelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ , add_prefix_space=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase__ , lowercase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
# Testing spaces after special tokens
__UpperCAmelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ )} ) # mask token has a left space
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
__UpperCAmelCase = '''Encode <mask> sequence'''
__UpperCAmelCase = '''Encode <mask>sequence'''
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = tokenizer.encode(lowercase__ )
__UpperCAmelCase = encoded.index(lowercase__ )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
pass
def lowerCAmelCase_ (self ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
__UpperCAmelCase = '''A, <mask> AllenNLP sentence.'''
__UpperCAmelCase = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
__UpperCAmelCase = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowerCAmelCase_ (self ) -> Optional[int]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , lowercase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCAmelCase = F'''{text_of_1_token} {text_of_1_token}'''
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowercase__ , use_fast=lowercase__ , add_prefix_space=lowercase__ , trim_offsets=lowercase__ )
__UpperCAmelCase = tokenizer_r(lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) , )
| 333 | 0 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase_ = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[int] ,_snake_case : int = None ,_snake_case : Optional[Any] = None ,_snake_case : Optional[Any] = None ,_snake_case : str = True ,) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = [file for file in os.listdir(lowerCamelCase__ ) if os.path.isfile(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) )]
if identifier is not None:
lowercase__ : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
for n_ in n_identifier:
lowercase__ : List[str] = [file for file in files if n_ not in file]
else:
lowercase__ : Optional[int] = [file for file in files if n_identifier not in file]
lowercase__ : List[str] = ignore_files or []
ignore_files.append('''__init__.py''' )
lowercase__ : Optional[Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' ,lowerCamelCase__ )
if only_modules:
lowercase__ : int = file.split('''.''' )[0]
try:
lowercase__ : Union[str, Any] = getattr(lowerCamelCase__ ,lowerCamelCase__ )
lowercase__ : List[Any] = doctest.DocTestSuite(lowerCamelCase__ )
lowercase__ : Tuple = unittest.TextTestRunner().run(lowerCamelCase__ )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
lowercase__ : Optional[int] = doctest.testfile(str('''..''' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : int = Path('''src/transformers''' )
lowercase__ : List[str] = '''modeling'''
lowercase__ : Any = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(lowerCamelCase__ ,identifier=lowerCamelCase__ ,ignore_files=lowerCamelCase__ )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ : List[Any] = Path('''src/transformers''' )
lowercase__ : List[Any] = '''tokenization'''
self.analyze_directory(lowerCamelCase__ ,identifier=lowerCamelCase__ )
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
lowercase__ : Tuple = Path('''src/transformers''' )
lowercase__ : List[str] = '''configuration'''
self.analyze_directory(lowerCamelCase__ ,identifier=lowerCamelCase__ )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ : Any = Path('''src/transformers''' )
lowercase__ : Optional[Any] = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(lowerCamelCase__ ,n_identifier=lowerCamelCase__ )
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = Path('''docs/source''' )
lowercase__ : str = ['''favicon.ico''']
self.analyze_directory(lowerCamelCase__ ,ignore_files=lowerCamelCase__ ,only_modules=lowerCamelCase__ )
| 357 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase_ = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase_ = 'UperNetConfig'
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : int ,_snake_case : int ,_snake_case : Union[int, Tuple[int, int]] ,_snake_case : Union[int, Tuple[int, int], str] = 0 ,_snake_case : bool = False ,_snake_case : Union[int, Tuple[int, int]] = 1 ,) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = nn.Convad(
in_channels=_snake_case ,out_channels=_snake_case ,kernel_size=_snake_case ,padding=_snake_case ,bias=_snake_case ,dilation=_snake_case ,)
lowercase__ : Tuple = nn.BatchNormad(_snake_case )
lowercase__ : List[str] = nn.ReLU()
def UpperCAmelCase ( self : str ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.conv(_snake_case )
lowercase__ : List[str] = self.batch_norm(_snake_case )
lowercase__ : Tuple = self.activation(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : int ,_snake_case : int ,_snake_case : int ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : List[Any] = [
nn.AdaptiveAvgPoolad(_snake_case ),
UperNetConvModule(_snake_case ,_snake_case ,kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Dict ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Any = input
for layer in self.layers:
lowercase__ : int = layer(_snake_case )
return hidden_state
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : Tuple[int, ...] ,_snake_case : int ,_snake_case : int ,_snake_case : bool ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = pool_scales
lowercase__ : Dict = align_corners
lowercase__ : Optional[Any] = in_channels
lowercase__ : Optional[Any] = channels
lowercase__ : int = []
for i, pool_scale in enumerate(_snake_case ):
lowercase__ : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=_snake_case ,in_channels=_snake_case ,channels=_snake_case )
self.blocks.append(_snake_case )
self.add_module(str(_snake_case ) ,_snake_case )
def UpperCAmelCase ( self : Any ,_snake_case : torch.Tensor ) -> List[torch.Tensor]:
"""simple docstring"""
lowercase__ : int = []
for ppm in self.blocks:
lowercase__ : Any = ppm(_snake_case )
lowercase__ : int = nn.functional.interpolate(
_snake_case ,size=x.size()[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
ppm_outs.append(_snake_case )
return ppm_outs
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ : str = config
lowercase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__ : Optional[Any] = in_channels
lowercase__ : Any = config.hidden_size
lowercase__ : Optional[Any] = False
lowercase__ : Optional[int] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
# PSP Module
lowercase__ : Dict = UperNetPyramidPoolingModule(
self.pool_scales ,self.in_channels[-1] ,self.channels ,align_corners=self.align_corners ,)
lowercase__ : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
# FPN Module
lowercase__ : Any = nn.ModuleList()
lowercase__ : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__ : List[Any] = UperNetConvModule(_snake_case ,self.channels ,kernel_size=1 )
lowercase__ : Optional[int] = UperNetConvModule(self.channels ,self.channels ,kernel_size=3 ,padding=1 )
self.lateral_convs.append(_snake_case )
self.fpn_convs.append(_snake_case )
lowercase__ : int = UperNetConvModule(
len(self.in_channels ) * self.channels ,self.channels ,kernel_size=3 ,padding=1 ,)
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : Optional[Any] ) -> str:
"""simple docstring"""
lowercase__ : Dict = inputs[-1]
lowercase__ : Optional[int] = [x]
psp_outs.extend(self.psp_modules(_snake_case ) )
lowercase__ : Optional[Any] = torch.cat(_snake_case ,dim=1 )
lowercase__ : List[str] = self.bottleneck(_snake_case )
return output
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_snake_case ) )
# build top-down path
lowercase__ : List[Any] = len(_snake_case )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Union[str, Any] = laterals[i - 1].shape[2:]
lowercase__ : int = laterals[i - 1] + nn.functional.interpolate(
laterals[i] ,size=_snake_case ,mode='''bilinear''' ,align_corners=self.align_corners )
# build outputs
lowercase__ : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 ,0 ,-1 ):
lowercase__ : Any = nn.functional.interpolate(
fpn_outs[i] ,size=fpn_outs[0].shape[2:] ,mode='''bilinear''' ,align_corners=self.align_corners )
lowercase__ : Any = torch.cat(_snake_case ,dim=1 )
lowercase__ : Any = self.fpn_bottleneck(_snake_case )
lowercase__ : str = self.classifier(_snake_case )
return output
class __A ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_snake_case : List[Any] ,_snake_case : int = 2 ,_snake_case : int = 3 ,_snake_case : Union[int, Tuple[int, int]] = 1 ) -> None:
"""simple docstring"""
super().__init__()
lowercase__ : int = config
lowercase__ : Dict = config.auxiliary_in_channels
lowercase__ : Optional[int] = config.auxiliary_channels
lowercase__ : List[Any] = config.auxiliary_num_convs
lowercase__ : List[Any] = config.auxiliary_concat_input
lowercase__ : str = in_index
lowercase__ : Any = (kernel_size // 2) * dilation
lowercase__ : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels ,self.channels ,kernel_size=_snake_case ,padding=_snake_case ,dilation=_snake_case ) )
if self.num_convs == 0:
lowercase__ : List[str] = nn.Identity()
else:
lowercase__ : Dict = nn.Sequential(*_snake_case )
if self.concat_input:
lowercase__ : int = UperNetConvModule(
self.in_channels + self.channels ,self.channels ,kernel_size=_snake_case ,padding=kernel_size // 2 )
lowercase__ : List[str] = nn.Convad(self.channels ,config.num_labels ,kernel_size=1 )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.apply(self._init_weights )
def UpperCAmelCase ( self : List[Any] ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
if isinstance(_snake_case ,nn.Convad ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase ( self : List[str] ,_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowercase__ : str = encoder_hidden_states[self.in_index]
lowercase__ : List[str] = self.convs(_snake_case )
if self.concat_input:
lowercase__ : Any = self.conv_cat(torch.cat([hidden_states, output] ,dim=1 ) )
lowercase__ : Dict = self.classifier(_snake_case )
return output
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : Any = UperNetConfig
lowerCAmelCase : str = "pixel_values"
lowerCAmelCase : Dict = True
def UpperCAmelCase ( self : int ,_snake_case : str ) -> Optional[int]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase ( self : int ,_snake_case : str ,_snake_case : str=False ) -> List[str]:
"""simple docstring"""
if isinstance(_snake_case ,_snake_case ):
lowercase__ : List[Any] = value
lowerCAmelCase_ = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,A_ ,)
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_snake_case : Tuple ) -> int:
"""simple docstring"""
super().__init__(_snake_case )
lowercase__ : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__ : Any = UperNetHead(_snake_case ,in_channels=self.backbone.channels )
lowercase__ : str = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_snake_case ,config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase ( self : Dict ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[bool] = None ,_snake_case : Optional[torch.Tensor] = None ,_snake_case : Optional[bool] = None ,) -> Union[tuple, SemanticSegmenterOutput]:
"""simple docstring"""
lowercase__ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__ : Optional[Any] = self.backbone.forward_with_filtered_kwargs(
_snake_case ,output_hidden_states=_snake_case ,output_attentions=_snake_case )
lowercase__ : Optional[int] = outputs.feature_maps
lowercase__ : Tuple = self.decode_head(_snake_case )
lowercase__ : Optional[int] = nn.functional.interpolate(_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : List[str] = None
if self.auxiliary_head is not None:
lowercase__ : str = self.auxiliary_head(_snake_case )
lowercase__ : Dict = nn.functional.interpolate(
_snake_case ,size=pixel_values.shape[2:] ,mode='''bilinear''' ,align_corners=_snake_case )
lowercase__ : Any = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
lowercase__ : Union[str, Any] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : List[str] = loss_fct(_snake_case ,_snake_case )
lowercase__ : Optional[Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__ : Tuple = (logits,) + outputs[1:]
else:
lowercase__ : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_snake_case ,logits=_snake_case ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 302 | 0 |
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
# TODO Update this
lowercase__ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : str = """esm"""
def __init__( self : Union[str, Any] , a_ : int=None , a_ : List[str]=None , a_ : Optional[int]=None , a_ : Optional[int]=7_68 , a_ : List[Any]=12 , a_ : List[str]=12 , a_ : Optional[Any]=30_72 , a_ : Optional[Any]=0.1 , a_ : Tuple=0.1 , a_ : Union[str, Any]=10_26 , a_ : List[str]=0.02 , a_ : Optional[int]=1e-1_2 , a_ : int="absolute" , a_ : Union[str, Any]=True , a_ : int=None , a_ : int=False , a_ : Optional[Any]=False , a_ : Any=None , a_ : List[str]=None , **a_ : int , ):
super().__init__(pad_token_id=a_ , mask_token_id=a_ , **a_ )
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : Tuple = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : Dict = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : Tuple = layer_norm_eps
lowerCAmelCase_ : Dict = position_embedding_type
lowerCAmelCase_ : str = use_cache
lowerCAmelCase_ : str = emb_layer_norm_before
lowerCAmelCase_ : Any = token_dropout
lowerCAmelCase_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowerCAmelCase_ : int = EsmFoldConfig()
elif isinstance(a_ , a_ ):
lowerCAmelCase_ : int = EsmFoldConfig(**a_ )
lowerCAmelCase_ : Tuple = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowerCAmelCase_ : Any = get_default_vocab_list()
else:
lowerCAmelCase_ : Optional[Any] = vocab_list
else:
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Tuple = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , a_ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[str] = super().to_dict()
if isinstance(self.esmfold_config , a_ ):
lowerCAmelCase_ : int = self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : str = None
a_ : bool = True
a_ : bool = False
a_ : bool = False
a_ : bool = False
a_ : float = 0
a_ : bool = True
a_ : bool = False
a_ : int = 128
a_ : "TrunkConfig" = None
def lowerCamelCase ( self : str ):
if self.trunk is None:
lowerCAmelCase_ : List[Any] = TrunkConfig()
elif isinstance(self.trunk , a_ ):
lowerCAmelCase_ : str = TrunkConfig(**self.trunk )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Any = asdict(self )
lowerCAmelCase_ : int = self.trunk.to_dict()
return output
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : int = 48
a_ : int = 1024
a_ : int = 128
a_ : int = 32
a_ : int = 32
a_ : int = 32
a_ : float = 0
a_ : float = 0
a_ : bool = False
a_ : int = 4
a_ : Optional[int] = 128
a_ : "StructureModuleConfig" = None
def lowerCamelCase ( self : Optional[Any] ):
if self.structure_module is None:
lowerCAmelCase_ : Any = StructureModuleConfig()
elif isinstance(self.structure_module , a_ ):
lowerCAmelCase_ : Union[str, Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowerCAmelCase_ : List[str] = self.sequence_state_dim // self.sequence_head_width
lowerCAmelCase_ : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = asdict(self )
lowerCAmelCase_ : str = self.structure_module.to_dict()
return output
@dataclass
class __lowerCamelCase :
'''simple docstring'''
a_ : int = 384
a_ : int = 128
a_ : int = 16
a_ : int = 128
a_ : int = 12
a_ : int = 4
a_ : int = 8
a_ : float = 0.1
a_ : int = 8
a_ : int = 1
a_ : int = 2
a_ : int = 7
a_ : int = 10
a_ : float = 1E-8
a_ : float = 1E5
def lowerCamelCase ( self : Optional[int] ):
return asdict(self )
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 241 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] = IFInpaintingSuperResolutionPipeline
a_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
a_ : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
a_ : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCamelCase ( self : Optional[Any] ):
return self._get_superresolution_dummy_components()
def lowerCamelCase ( self : Optional[Any] , a_ : List[str] , a_ : Union[str, Any]=0 ):
if str(a_ ).startswith("mps" ):
lowerCAmelCase_ : List[Any] = torch.manual_seed(a_ )
else:
lowerCAmelCase_ : str = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCAmelCase_ : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(a_ ) ).to(a_ )
lowerCAmelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCAmelCase_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
lowerCAmelCase_ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowerCamelCase ( self : Optional[int] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCamelCase ( self : Optional[Any] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCamelCase ( self : Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCamelCase ( self : List[str] ):
self._test_save_load_local()
def lowerCamelCase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 241 | 1 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( _SCREAMING_SNAKE_CASE):
A_ : Dict = 'vision-encoder-decoder'
A_ : Dict = True
def __init__( self , **_SCREAMING_SNAKE_CASE ):
super().__init__(**_SCREAMING_SNAKE_CASE )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f"A configuraton of type {self.model_type} cannot be instantiated because "
f"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
__lowerCAmelCase : Dict = kwargs.pop('encoder' )
__lowerCAmelCase : Tuple = encoder_config.pop('model_type' )
__lowerCAmelCase : Dict = kwargs.pop('decoder' )
__lowerCAmelCase : List[Any] = decoder_config.pop('model_type' )
__lowerCAmelCase : List[Any] = AutoConfig.for_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = AutoConfig.for_model(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = True
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
__lowerCAmelCase : Dict = True
__lowerCAmelCase : List[str] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = copy.deepcopy(self.__dict__ )
__lowerCAmelCase : List[Any] = self.encoder.to_dict()
__lowerCAmelCase : Dict = self.decoder.to_dict()
__lowerCAmelCase : int = self.__class__.model_type
return output
class A__ ( _SCREAMING_SNAKE_CASE):
A_ : Optional[int] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 1E-4
@property
def __lowerCamelCase ( self ):
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class A__ ( _SCREAMING_SNAKE_CASE):
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = OrderedDict()
__lowerCAmelCase : List[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__lowerCAmelCase : List[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__lowerCAmelCase : Dict = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ):
import torch
__lowerCAmelCase : List[str] = OrderedDict()
__lowerCAmelCase : Any = super().generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = dummy_input['input_ids'].shape
__lowerCAmelCase : Union[str, Any] = (batch, encoder_sequence, self._config.encoder_hidden_size)
__lowerCAmelCase : Optional[Any] = dummy_input.pop('input_ids' )
__lowerCAmelCase : Optional[int] = dummy_input.pop('attention_mask' )
__lowerCAmelCase : List[Any] = torch.zeros(_SCREAMING_SNAKE_CASE )
return common_inputs
class A__ ( _SCREAMING_SNAKE_CASE):
@property
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return VisionEncoderDecoderEncoderOnnxConfig(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "default" ):
__lowerCAmelCase : Any = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 366 |
"""simple docstring"""
import argparse
import datetime
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
__lowerCAmelCase : Optional[Any] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_UpperCamelCase ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
__lowerCAmelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
__lowerCAmelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
__lowerCAmelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
__lowerCAmelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
__lowerCAmelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
__lowerCAmelCase : Tuple = datetime.date(int(_UpperCamelCase ) , int(_UpperCamelCase ) , int(_UpperCamelCase ) )
# Start math
if m <= 2:
__lowerCAmelCase : int = y - 1
__lowerCAmelCase : Tuple = m + 12
# maths var
__lowerCAmelCase : int = int(str(_UpperCamelCase )[:2] )
__lowerCAmelCase : int = int(str(_UpperCamelCase )[2:] )
__lowerCAmelCase : int = int(2.6 * m - 5.39 )
__lowerCAmelCase : int = int(c / 4 )
__lowerCAmelCase : int = int(k / 4 )
__lowerCAmelCase : int = int(d + k )
__lowerCAmelCase : int = int(t + u + v + x )
__lowerCAmelCase : int = int(z - (2 * c) )
__lowerCAmelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
__lowerCAmelCase : str = F"Your date {date_input}, is a {days[str(_UpperCamelCase )]}!"
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
lowerCamelCase__ = parser.parse_args()
zeller(args.date_input) | 182 | 0 |
import torch
def lowerCamelCase_ ( ):
"""simple docstring"""
if torch.cuda.is_available():
lowerCAmelCase__ : Optional[Any] = torch.cuda.device_count()
else:
lowerCAmelCase__ : str = 0
print(f'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 131 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict=13 , snake_case_ : Optional[Any]=32 , snake_case_ : List[Any]=3 , snake_case_ : Dict=4 , snake_case_ : Tuple=[10, 20, 30, 40] , snake_case_ : int=[2, 2, 3, 2] , snake_case_ : Union[str, Any]=True , snake_case_ : Optional[int]=True , snake_case_ : Union[str, Any]=37 , snake_case_ : Any="gelu" , snake_case_ : Union[str, Any]=10 , snake_case_ : str=0.02 , snake_case_ : str=["stage2", "stage3", "stage4"] , snake_case_ : str=3 , snake_case_ : List[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = type_sequence_label_size
A__ = initializer_range
A__ = out_features
A__ = num_labels
A__ = scope
A__ = num_stages
def __magic_name__ ( self : str ) -> Tuple:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __magic_name__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=snake_case_ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=snake_case_ , loss_ignore_index=255 , num_labels=self.num_labels , )
def __magic_name__ ( self : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ = UperNetForSemanticSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A__ = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __magic_name__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
), (
A__
), (
A__
),
) = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __magic_name__ ( self : int ) -> int:
'''simple docstring'''
A__ = UperNetModelTester(self )
A__ = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __magic_name__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(snake_case_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __magic_name__ ( self : Any ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __magic_name__ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __magic_name__ ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __magic_name__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __magic_name__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
def __magic_name__ ( self : List[Any] ) -> str:
'''simple docstring'''
def check_hidden_states_output(snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
A__ = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(snake_case_ )
A__ = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A__ = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def __magic_name__ ( self : Any ) -> str:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = UperNetForSemanticSegmentation.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _SCREAMING_SNAKE_CASE ( ) -> int:
A__ = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
A__ = Image.open(lowercase_ ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> List[Any]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
A__ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(snake_case_ )
A__ = prepare_img()
A__ = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
A__ = model(**snake_case_ )
A__ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
A__ = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(snake_case_ )
A__ = prepare_img()
A__ = processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
with torch.no_grad():
A__ = model(**snake_case_ )
A__ = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , snake_case_ )
A__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1e-4 ) )
| 247 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"""The `inpainting.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionInpaintPipeline` instead."""
)
| 370 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ , A_=False )-> int:
'''simple docstring'''
a : List[str] = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def lowercase ( A_ , A_ , A_=False )-> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
a : Tuple = ""
else:
a : Dict = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : str = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
a : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a : Optional[int] = in_proj_weight[
: config.hidden_size, :
]
a : Tuple = in_proj_bias[: config.hidden_size]
a : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : int = in_proj_weight[
-config.hidden_size :, :
]
a : int = in_proj_bias[-config.hidden_size :]
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
def lowercase ( A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
a : List[Any] = dct.pop(A_ )
a : str = val
def lowercase ( )-> List[Any]:
'''simple docstring'''
a : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Tuple = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Union[str, Any]:
'''simple docstring'''
a : Optional[Any] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=A_ , )
a : Union[str, Any] = ViTHybridConfig(backbone_config=A_ , image_size=384 , num_labels=1_000 )
a : Optional[Any] = False
# load original model from timm
a : Any = timm.create_model(A_ , pretrained=A_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(A_ )
a : int = create_rename_keys(A_ , A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_q_k_v(A_ , A_ , A_ )
a : Union[str, Any] = "huggingface/label-files"
a : Optional[int] = "imagenet-1k-id2label.json"
a : str = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Optional[Any] = {int(A_ ): v for k, v in idalabel.items()}
a : str = idalabel
a : Any = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
a : List[Any] = ViTHybridModel(A_ ).eval()
else:
a : Optional[int] = ViTHybridForImageClassification(A_ ).eval()
model.load_state_dict(A_ )
# create image processor
a : Tuple = create_transform(**resolve_data_config({} , model=A_ ) )
a : List[Any] = transform.transforms
a : int = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
a : str = ViTHybridImageProcessor(
do_resize=A_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=A_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=A_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
a : List[Any] = prepare_img()
a : Optional[Any] = transform(A_ ).unsqueeze(0 )
a : str = processor(A_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(A_ , A_ )
# verify logits
with torch.no_grad():
a : Dict = model(A_ )
a : Tuple = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
a : str = timm_model.forward_features(A_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A_ , outputs.pooler_output , atol=1e-3 )
else:
a : int = timm_model(A_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A_ , outputs.logits , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
__lowercase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 226 | 0 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__UpperCamelCase : List[Any] = logging.getLogger()
def A ( _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[str] = '''\n'''.join(__A )
Path(__A ).open('''w''' ).writelines(__A )
__UpperCamelCase : List[Any] = "patrickvonplaten/t5-tiny-random"
__UpperCamelCase : Optional[int] = "sshleifer/bart-tiny-random"
__UpperCamelCase : Dict = "sshleifer/tiny-mbart"
__UpperCamelCase : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( _lowerCamelCase):
def __A ( self : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
SCREAMING_SNAKE_CASE : Tuple = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE : Union[str, Any] = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.''']
_dump_articles(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' )
SCREAMING_SNAKE_CASE : Dict = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
SCREAMING_SNAKE_CASE : Any = f"""\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n """.split()
with patch.object(lowerCAmelCase_ , '''argv''' , lowerCAmelCase_ ):
run_generate()
assert Path(lowerCAmelCase_ ).exists()
# os.remove(Path(output_file_name))
def __A ( self : List[str] ):
'''simple docstring'''
self.run_eval_tester(lowerCAmelCase_ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def __A ( self : Any , UpperCamelCase__ : int ):
'''simple docstring'''
self.run_eval_tester(lowerCAmelCase_ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def __A ( self : str , UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source'''
SCREAMING_SNAKE_CASE : Dict = input_file_name.parent / '''utest_output.txt'''
assert not output_file_name.exists()
SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''],
'''de''': [
'''Maschinelles Lernen ist großartig, oder?''',
'''Ich esse gerne Bananen''',
'''Morgen ist wieder ein toller Tag!''',
],
}
SCREAMING_SNAKE_CASE : str = Path(self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE : List[Any] = str(tmp_dir / '''scores.json''' )
SCREAMING_SNAKE_CASE : Tuple = str(tmp_dir / '''val.target''' )
_dump_articles(lowerCAmelCase_ , text['''en'''] )
_dump_articles(lowerCAmelCase_ , text['''de'''] )
SCREAMING_SNAKE_CASE : List[Any] = '''translation_en_to_de''' if model == T5_TINY else '''summarization'''
SCREAMING_SNAKE_CASE : Union[str, Any] = f"""\n run_eval_search.py\n {model}\n {str(lowerCAmelCase_ )}\n {str(lowerCAmelCase_ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n """.split()
testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] )
with patch.object(lowerCAmelCase_ , '''argv''' , lowerCAmelCase_ ):
with CaptureStdout() as cs:
run_search()
SCREAMING_SNAKE_CASE : Dict = [''' num_beams | length_penalty''', model, '''Best score args''']
SCREAMING_SNAKE_CASE : Optional[Any] = ['''Info''']
if "translation" in task:
expected_strings.append('''bleu''' )
else:
expected_strings.extend(lowerCAmelCase_ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(lowerCAmelCase_ ).exists()
os.remove(Path(lowerCAmelCase_ ) )
| 182 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase : List[str] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 | 0 |
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code )
class A__ ( snake_case__ ):
"""simple docstring"""
@staticmethod
def a_ ( __snake_case ):
snake_case = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=__snake_case , default=__snake_case , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=__snake_case , help='''Name of the model to download''' )
download_parser.set_defaults(func=__snake_case )
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = model
snake_case = cache
snake_case = force
snake_case = trust_remote_code
def a_ ( self ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 213 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'detr'
__magic_name__ = ['past_key_values']
__magic_name__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __snake_case=True , __snake_case=None , __snake_case=3 , __snake_case=1_0_0 , __snake_case=6 , __snake_case=2_0_4_8 , __snake_case=8 , __snake_case=6 , __snake_case=2_0_4_8 , __snake_case=8 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=True , __snake_case="relu" , __snake_case=2_5_6 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1.0 , __snake_case=False , __snake_case="sine" , __snake_case="resnet50" , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=1 , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__snake_case , __snake_case ):
snake_case = backbone_config.get('''model_type''' )
snake_case = CONFIG_MAPPING[backbone_model_type]
snake_case = config_class.from_dict(__snake_case )
# set timm attributes to None
snake_case , snake_case , snake_case = None, None, None
snake_case = use_timm_backbone
snake_case = backbone_config
snake_case = num_channels
snake_case = num_queries
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = init_xavier_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = encoder_layers
snake_case = auxiliary_loss
snake_case = position_embedding_type
snake_case = backbone
snake_case = use_pretrained_backbone
snake_case = dilation
# Hungarian matcher
snake_case = class_cost
snake_case = bbox_cost
snake_case = giou_cost
# Loss coefficients
snake_case = mask_loss_coefficient
snake_case = dice_loss_coefficient
snake_case = bbox_loss_coefficient
snake_case = giou_loss_coefficient
snake_case = eos_coefficient
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def a_ ( self ):
return self.encoder_attention_heads
@property
def a_ ( self ):
return self.d_model
@classmethod
def a_ ( cls , __snake_case , **__snake_case ):
return cls(backbone_config=__snake_case , **__snake_case )
def a_ ( self ):
snake_case = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case = self.backbone_config.to_dict()
snake_case = self.__class__.model_type
return output
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def a_ ( self ):
return 1E-5
@property
def a_ ( self ):
return 1_2
| 213 | 1 |
from __future__ import annotations
from math import pi
def A ( a_ ,a_ ,a_ ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""new-model"""
if is_tf_available():
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : List[str] =NewModelConfig
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] ='bert-base-cased'
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ='bert-base-cased'
__UpperCamelCase : Optional[int] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Tuple =TFAutoModelForPreTraining.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : str =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : str =TFAutoModelForCausalLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[int] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Any =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =TFAutoModelForMaskedLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : str =TFAutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : Union[str, Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[str] =TFAutoModelForSequenceClassification.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__UpperCamelCase : List[Any] =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =TFAutoModelForQuestionAnswering.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_tensorflow_probability
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__UpperCamelCase : Any =AutoConfig.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCamelCase__ )
__UpperCamelCase , __UpperCamelCase : List[str] =TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =TFAutoModelWithLMHead.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCamelCase__ ) , 14410 )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('sgugger/funnel-random-tiny' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : Optional[int] =copy.deepcopy(model.config )
__UpperCamelCase : Optional[Any] =['FunnelBaseModel']
__UpperCamelCase : Tuple =TFAutoModel.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : List[Any] =TFAutoModel.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
try:
AutoConfig.register('new-model' , lowerCamelCase__ )
__UpperCamelCase : int =[
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCamelCase__ ):
auto_class.register(lowerCamelCase__ , lowerCamelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__UpperCamelCase : List[str] =BertModelTester(self ).get_config()
__UpperCamelCase : Optional[Any] =NewModelConfig(**tiny_config.to_dict() )
__UpperCamelCase : Dict =auto_class.from_config(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =auto_class.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ):
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('bert-base' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__UpperCamelCase : Union[str, Any] =TFAutoModel.from_pretrained(lowerCamelCase__ , revision='aaaaaa' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
lowerCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin' , ):
__UpperCamelCase : List[str] =TFAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def __lowercase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(lowerCamelCase__ , 'Use `from_pt=True` to load this model' ):
__UpperCamelCase : List[Any] =TFAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__UpperCamelCase : Dict =TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
with RequestCounter() as counter:
__UpperCamelCase : Union[str, Any] =TFAutoModel.from_pretrained('ArthurZ/tiny-random-bert-sharded' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 71 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ : List[str] = logging.get_logger(__name__)
A_ : Optional[int] = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
for attribute in key.split('.' ):
UpperCamelCase_: Dict = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
UpperCamelCase_: Tuple = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
UpperCamelCase_: str = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
UpperCamelCase_: List[str] = value
elif weight_type == "weight_g":
UpperCamelCase_: List[str] = value
elif weight_type == "weight_v":
UpperCamelCase_: Tuple = value
elif weight_type == "bias":
UpperCamelCase_: Dict = value
else:
UpperCamelCase_: Optional[int] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: Optional[Any] = []
UpperCamelCase_: List[str] = fairseq_model.state_dict()
UpperCamelCase_: str = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase_: List[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase_: str = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase_: List[Any] = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase_: str = True
if "*" in mapped_key:
UpperCamelCase_: Union[str, Any] = name.split(UpperCAmelCase__ )[0].split('.' )[-2]
UpperCamelCase_: str = mapped_key.replace('*' , UpperCAmelCase__ )
if "weight_g" in name:
UpperCamelCase_: Optional[int] = 'weight_g'
elif "weight_v" in name:
UpperCamelCase_: List[Any] = 'weight_v'
elif "weight" in name:
UpperCamelCase_: Optional[Any] = 'weight'
elif "bias" in name:
UpperCamelCase_: Union[str, Any] = 'bias'
else:
UpperCamelCase_: int = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
UpperCamelCase_: Optional[int] = full_name.split('conv_layers.' )[-1]
UpperCamelCase_: Any = name.split('.' )
UpperCamelCase_: Dict = int(items[0] )
UpperCamelCase_: Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
UpperCamelCase_: List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
UpperCamelCase_: List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
UpperCamelCase_: int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
UpperCamelCase_: Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
UpperCamelCase_: Any = SEWConfig()
if is_finetuned:
UpperCamelCase_: List[Any] = model.wav_encoder.wav_model.cfg
else:
UpperCamelCase_: Union[str, Any] = model.cfg
UpperCamelCase_: int = fs_config.conv_bias
UpperCamelCase_: Union[str, Any] = eval(fs_config.conv_feature_layers )
UpperCamelCase_: Union[str, Any] = [x[0] for x in conv_layers]
UpperCamelCase_: Tuple = [x[1] for x in conv_layers]
UpperCamelCase_: Tuple = [x[2] for x in conv_layers]
UpperCamelCase_: Optional[int] = 'gelu'
UpperCamelCase_: Dict = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
UpperCamelCase_: Optional[Any] = 0.0
UpperCamelCase_: Union[str, Any] = fs_config.activation_fn.name
UpperCamelCase_: List[Any] = fs_config.encoder_embed_dim
UpperCamelCase_: Optional[int] = 0.02
UpperCamelCase_: List[str] = fs_config.encoder_ffn_embed_dim
UpperCamelCase_: List[Any] = 1E-5
UpperCamelCase_: Optional[Any] = fs_config.encoder_layerdrop
UpperCamelCase_: Tuple = fs_config.encoder_attention_heads
UpperCamelCase_: Dict = fs_config.conv_pos_groups
UpperCamelCase_: Dict = fs_config.conv_pos
UpperCamelCase_: Dict = len(UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = fs_config.encoder_layers
UpperCamelCase_: Tuple = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCamelCase_: List[Any] = model.cfg
UpperCamelCase_: Any = fs_config.final_dropout
UpperCamelCase_: Dict = fs_config.layerdrop
UpperCamelCase_: Tuple = fs_config.activation_dropout
UpperCamelCase_: Optional[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCamelCase_: int = fs_config.attention_dropout
UpperCamelCase_: Optional[int] = fs_config.dropout_input
UpperCamelCase_: Optional[int] = fs_config.dropout
UpperCamelCase_: Tuple = fs_config.mask_channel_length
UpperCamelCase_: Dict = fs_config.mask_channel_prob
UpperCamelCase_: List[str] = fs_config.mask_length
UpperCamelCase_: Union[str, Any] = fs_config.mask_prob
UpperCamelCase_: Dict = 'Wav2Vec2FeatureExtractor'
UpperCamelCase_: int = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=True ) -> Tuple:
if is_finetuned:
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_: str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
UpperCamelCase_: int = SEWConfig.from_pretrained(UpperCAmelCase__ )
else:
UpperCamelCase_: str = convert_config(model[0] , UpperCAmelCase__ )
UpperCamelCase_: str = model[0].eval()
UpperCamelCase_: List[str] = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase_: str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
if is_finetuned:
if dict_path:
UpperCamelCase_: Union[str, Any] = Dictionary.load(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase_: int = target_dict.pad_index
UpperCamelCase_: int = target_dict.bos_index
UpperCamelCase_: Dict = target_dict.pad_index
UpperCamelCase_: int = target_dict.bos_index
UpperCamelCase_: Union[str, Any] = target_dict.eos_index
UpperCamelCase_: Optional[int] = len(target_dict.symbols )
UpperCamelCase_: Optional[int] = os.path.join(UpperCAmelCase__ , 'vocab.json' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = WavaVecaCTCTokenizer(
UpperCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCAmelCase__ , )
UpperCamelCase_: Dict = WavaVecaProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
UpperCamelCase_: Tuple = SEWForCTC(UpperCAmelCase__ )
else:
UpperCamelCase_: Union[str, Any] = SEWModel(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
hf_model.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A_ : str = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
) | 292 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =CLIPTokenizer
a : List[Any] =CLIPTokenizerFast
a : str =True
a : List[Any] ={}
a : str =False
def _a ( self ):
super().setUp()
# fmt: off
UpperCamelCase_: str = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
UpperCamelCase_: Any = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCamelCase_: Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
UpperCamelCase_: Any = {'unk_token': '<unk>'}
UpperCamelCase_: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_: Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Union[str, Any] = 'lower newer'
UpperCamelCase_: Tuple = 'lower newer'
return input_text, output_text
def _a ( self ):
UpperCamelCase_: Dict = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase_: Optional[int] = 'lower newer'
UpperCamelCase_: Any = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
UpperCamelCase_: Union[str, Any] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_: str = tokens + [tokenizer.unk_token]
UpperCamelCase_: Optional[Any] = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
@require_ftfy
def _a ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Optional[int] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: List[str] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Optional[int] = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
UpperCamelCase_: Tuple = tokenizer_s.tokenize(_lowerCamelCase )
UpperCamelCase_: Any = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase_: List[str] = 'xa\u0303y' + ' ' + 'x\xe3y'
UpperCamelCase_: List[Any] = tokenizer_s.tokenize(_lowerCamelCase )
UpperCamelCase_: int = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase_: Dict = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase_: int = tokenizer_s.tokenize(_lowerCamelCase )
UpperCamelCase_: Dict = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase_: List[str] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase_: Optional[Any] = tokenizer_s.tokenize(_lowerCamelCase )
UpperCamelCase_: str = tokenizer_r.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def _a ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: str = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase_: str = f'''{text_of_1_token} {text_of_1_token}'''
UpperCamelCase_: Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , )
UpperCamelCase_: int = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowerCamelCase ) + 1, len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
UpperCamelCase_: List[str] = f''' {text}'''
UpperCamelCase_: str = self.rust_tokenizer_class.from_pretrained(
_lowerCamelCase , use_fast=_lowerCamelCase , )
UpperCamelCase_: Dict = tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ) + 1, 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
def _a ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_lowerCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _a ( self ):
super().test_tokenization_python_rust_equals()
def _a ( self ):
# CLIP always lower cases letters
pass | 292 | 1 |
"""simple docstring"""
_lowercase : Any = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_lowercase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
_lowercase : int = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 332 |
"""simple docstring"""
from collections import defaultdict
def lowercase__ ( snake_case_ :str , snake_case_ :str ):
__UpperCAmelCase = first_str.lower().strip()
__UpperCAmelCase = second_str.lower().strip()
# Remove whitespace
__UpperCAmelCase = first_str.replace(''' ''' , '''''' )
__UpperCAmelCase = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(snake_case_ ) != len(snake_case_ ):
return False
# Default values for count should be 0
__UpperCAmelCase = defaultdict(snake_case_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(snake_case_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase : List[Any] = input('Enter the first string ').strip()
_lowercase : Tuple = input('Enter the second string ').strip()
_lowercase : str = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 332 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class A_ ( _a ):
def __init__( self: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: str=13 ,__lowerCAmelCase: Optional[Any]=7 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: int=True ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: Tuple=True ,__lowerCAmelCase: int=99 ,__lowerCAmelCase: Optional[Any]=32 ,__lowerCAmelCase: int=5 ,__lowerCAmelCase: Optional[int]=4 ,__lowerCAmelCase: List[Any]=37 ,__lowerCAmelCase: List[str]="gelu" ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: str=512 ,__lowerCAmelCase: int=16 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: int=False ,__lowerCAmelCase: Dict=True ,__lowerCAmelCase: Union[str, Any]="None" ,__lowerCAmelCase: Any=3 ,__lowerCAmelCase: Any=4 ,__lowerCAmelCase: Tuple=None ,):
'''simple docstring'''
_lowerCamelCase : int = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : List[Any] = seq_length
_lowerCamelCase : Dict = is_training
_lowerCamelCase : Union[str, Any] = use_input_mask
_lowerCamelCase : List[str] = use_token_type_ids
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Any = hidden_size
_lowerCamelCase : Union[str, Any] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Dict = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : Optional[Any] = type_vocab_size
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Tuple = num_choices
_lowerCamelCase : Union[str, Any] = relative_attention
_lowerCamelCase : Any = position_biased_input
_lowerCamelCase : List[str] = pos_att_type
_lowerCamelCase : int = scope
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
_lowerCamelCase : str = None
if self.use_token_type_ids:
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_lowerCamelCase : int = None
_lowerCamelCase : int = None
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowerCamelCase : str = ids_tensor([self.batch_size] ,self.num_choices )
_lowerCamelCase : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self: List[str] ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def _lowercase ( self: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : int = DebertaVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase ,token_type_ids=__lowerCAmelCase )[0]
_lowerCamelCase : List[Any] = model(__lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: str ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = DebertaVaForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : List[str] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = self.num_labels
_lowerCamelCase : Union[str, Any] = DebertaVaForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: str ,__lowerCAmelCase: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.num_labels
_lowerCamelCase : str = DebertaVaForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Union[str, Any] = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self: int ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = DebertaVaForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Any = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,start_positions=__lowerCAmelCase ,end_positions=__lowerCAmelCase ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _lowercase ( self: Tuple ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Any ,__lowerCAmelCase: str ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase : List[str] = DebertaVaForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : Any = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
_lowerCamelCase : int = model(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
), (
_lowerCamelCase
),
) : List[Any] = config_and_inputs
_lowerCamelCase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[str] = DebertaVaModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 )
def _lowercase ( self: List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__lowerCAmelCase )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__lowerCAmelCase )
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : str = DebertaVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _lowercase ( self: int ):
'''simple docstring'''
pass
@slow
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Tuple = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_lowerCamelCase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCamelCase : Tuple = model(__lowerCAmelCase ,attention_mask=__lowerCAmelCase )[0]
# compare the actual values for a slice.
_lowerCamelCase : Any = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,__lowerCAmelCase ,atol=1e-4 ) ,F"""{output[:, 1:4, 1:4]}""" ) | 340 |
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_lowerCamelCase : int = precision
_lowerCamelCase : Dict = ceil(precision / 14 )
_lowerCamelCase : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
_lowerCamelCase : int = 1
_lowerCamelCase : Optional[int] = 13591409
_lowerCamelCase : int = Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_lowerCamelCase : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = 50
print(f'''The first {n} digits of pi is: {pi(n)}''') | 340 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : str ):
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 176 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__snake_case = NewType("""DataClass""", Any)
__snake_case = NewType("""DataClassType""", Any)
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' )
def _lowercase ( UpperCamelCase_ ) -> Callable[[str], Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {str(UpperCamelCase_ ): choice for choice in choices}
return lambda UpperCamelCase_ : str_to_choice.get(UpperCamelCase_ , UpperCamelCase_ )
def _lowercase ( *,
UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = dataclasses.MISSING , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> dataclasses.Field:
'''simple docstring'''
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE__ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE__ = aliases
if help is not None:
SCREAMING_SNAKE_CASE__ = help
return dataclasses.field(metadata=UpperCamelCase_ , default=UpperCamelCase_ , default_factory=UpperCamelCase_ , **UpperCamelCase_ )
class lowercase__ ( _UpperCAmelCase ):
A__ : Iterable[DataClassType]
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : Optional[Any] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter
super().__init__(**UpperCAmelCase_ )
if dataclasses.is_dataclass(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [dataclass_types]
SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(UpperCAmelCase_ )
@staticmethod
def A_ ( UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field ):
SCREAMING_SNAKE_CASE__ = F'--{field.name}'
SCREAMING_SNAKE_CASE__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , UpperCAmelCase_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
SCREAMING_SNAKE_CASE__ = kwargs.pop('aliases' , [] )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = [aliases]
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(UpperCAmelCase_ , 'UnionType' ) and isinstance(UpperCAmelCase_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F' Problem encountered in field \'{field.name}\'.' )
if type(UpperCAmelCase_ ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE__ = (
field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE__ = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE__ = {}
if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE__ = field.type.__args__
else:
SCREAMING_SNAKE_CASE__ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
else:
SCREAMING_SNAKE_CASE__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE__ = copy(UpperCAmelCase_ )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE__ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE__ = '?'
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE__ = True
elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = field.type.__args__[0]
SCREAMING_SNAKE_CASE__ = '+'
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
else:
SCREAMING_SNAKE_CASE__ = True
parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE__ = False
parser.add_argument(F'--no_{field.name}' , action='store_false' , dest=field.name , **UpperCAmelCase_ )
def A_ ( self : List[Any] , UpperCAmelCase_ : DataClassType ):
if hasattr(UpperCAmelCase_ , '_argument_group_name' ):
SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE__ = self
try:
SCREAMING_SNAKE_CASE__ = get_type_hints(UpperCAmelCase_ )
except NameError:
raise RuntimeError(
F'Type resolution failed for {dtype}. Try declaring the class in global scope or '
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '.'.join(map(UpperCAmelCase_ , sys.version_info[:3] ) )
raise RuntimeError(
F'Type resolution failed for {dtype} on Python {python_version}. Try removing '
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(UpperCAmelCase_ ):
if not field.init:
continue
SCREAMING_SNAKE_CASE__ = type_hints[field.name]
self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Dict , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE__ = []
if args_filename:
args_files.append(Path(UpperCAmelCase_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE__ = ArgumentParser()
args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = vars(UpperCAmelCase_ ).get(args_file_flag.lstrip('-' ) , UpperCAmelCase_ )
if cmd_args_file_paths:
args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys}
for k in keys:
delattr(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(UpperCAmelCase_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' )
return (*outputs,)
def A_ ( self : str , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = set(args.keys() )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE__ = dtype(**UpperCAmelCase_ )
outputs.append(UpperCAmelCase_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}' )
return tuple(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
with open(Path(UpperCAmelCase_ ) , encoding='utf-8' ) as open_json_file:
SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE__ = self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
| 176 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase ( _UpperCAmelCase ):
def __A ( self ):
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase__ , "width_multiplier" ) )
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=64 , UpperCAmelCase__=2 , UpperCAmelCase__=3 , UpperCAmelCase__="swish" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.02 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=10 , UpperCAmelCase__=None , UpperCAmelCase__=0.25 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , ):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = make_divisible(512 * width_multiplier , divisor=8 )
A__ = hidden_act
A__ = conv_kernel_size
A__ = output_stride
A__ = classifier_dropout_prob
A__ = use_labels
A__ = is_training
A__ = num_labels
A__ = initializer_range
A__ = scope
A__ = width_multiplier
A__ = ffn_dropout
A__ = attn_dropout
def __A ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def __A ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = MobileViTVaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.num_labels
A__ = MobileViTVaForImageClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = self.num_labels
A__ = MobileViTVaForSemanticSegmentation(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
A__ = model(UpperCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __A ( self ):
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase : Union[str, Any] = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Tuple = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : str = False
def __A ( self ):
A__ = MobileViTVaModelTester(self )
A__ = MobileViTVaConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def __A ( self ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def __A ( self ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def __A ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def __A ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __A ( self ):
pass
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
def check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
A__ = outputs.hidden_states
A__ = 5
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
A__ = 2
for i in range(len(UpperCAmelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase__ )
@slow
def __A ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = MobileViTVaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def UpperCamelCase ( )-> int:
"""simple docstring"""
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def __A ( self ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def __A ( self ):
A__ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
UpperCAmelCase__ )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__ )
# verify the logits
A__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
A__ = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ).to(UpperCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def __A ( self ):
A__ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
A__ = model.to(UpperCAmelCase__ )
A__ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__ )
A__ = outputs.logits
# verify the logits
A__ = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCAmelCase__ )
A__ = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCAmelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def __A ( self ):
A__ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
A__ = model.to(UpperCAmelCase__ )
A__ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors="pt" ).to(UpperCAmelCase__ )
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__ )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase__ , target_sizes=[(50, 60)] )
A__ = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase__ )
A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCAmelCase__ )
A__ = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCAmelCase__ )
| 358 |
def UpperCamelCase ( _A : str , _A : str )-> str:
"""simple docstring"""
A__ = len(_A )
A__ = len(_A )
A__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
A__ = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 198 | 0 |
"""simple docstring"""
def __A ( a_ :str , a_ :str) -> float:
def get_matched_characters(a_ :str , a_ :str) -> str:
__a : Optional[Any] = []
__a : Optional[int] = min(len(_stra) , len(_stra)) // 2
for i, l in enumerate(_stra):
__a : Optional[Any] = int(max(0 , i - limit))
__a : int = int(min(i + limit + 1 , len(_stra)))
if l in _stra[left:right]:
matched.append(a_)
__a : Dict = F"""{_stra[0:_stra.index(a_)]} {_stra[_stra.index(a_) + 1:]}"""
return "".join(a_)
# matching characters
__a : List[str] = get_matched_characters(a_ , a_)
__a : Optional[int] = get_matched_characters(a_ , a_)
__a : List[str] = len(a_)
# transposition
__a : Any = (
len([(ca, ca) for ca, ca in zip(a_ , a_) if ca != ca]) // 2
)
if not match_count:
__a : Optional[int] = 0.0
else:
__a : Any = (
1
/ 3
* (
match_count / len(a_)
+ match_count / len(a_)
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a : str = 0
for ca, ca in zip(stra[:4] , stra[:4]):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world''')) | 160 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 160 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def UpperCamelCase ( _A : Any )-> Optional[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"""could not parse string as bool {string}""" )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Optional[int] = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 198 |
from __future__ import annotations
class UpperCamelCase :
def __init__( self , UpperCAmelCase__=None ):
A__ = data
A__ = None
def __repr__( self ):
A__ = []
A__ = self
while temp:
string_rep.append(F"""{temp.data}""" )
A__ = temp.next
return "->".join(UpperCAmelCase__ )
def UpperCamelCase ( _A : list )-> Dict:
"""simple docstring"""
if not elements_list:
raise Exception("The Elements List is empty" )
A__ = A__ = Node(elements_list[0] )
for i in range(1 , len(_A ) ):
A__ = Node(elements_list[i] )
A__ = current.next
return head
def UpperCamelCase ( _A : Node )-> None:
"""simple docstring"""
if head_node is not None and isinstance(_A , _A ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCamelCase ( )-> Tuple:
"""simple docstring"""
from doctest import testmod
testmod()
A__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(_A )
print("Elements in Reverse:" )
print_reverse(_A )
if __name__ == "__main__":
main()
| 198 | 1 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowercase__: Union[str, Any] = iter(UpperCAmelCase_ )
while True:
lowercase__: Dict = tuple(itertools.islice(UpperCAmelCase_ , UpperCAmelCase_ ) )
if not chunk:
return
yield chunk
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Optional[Any]:
lowercase__: List[str] = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase__: str = ''''''
if len(UpperCAmelCase_ ) < 2:
return dirty
for i in range(len(UpperCAmelCase_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(UpperCAmelCase_ ) & 1:
clean += "X"
return clean
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
lowercase__: Any = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase__: List[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(UpperCAmelCase_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(UpperCAmelCase_ )
return table
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
lowercase__: Tuple = generate_table(UpperCAmelCase_ )
lowercase__: Any = prepare_input(UpperCAmelCase_ )
lowercase__: int = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
lowercase__, lowercase__: Any = divmod(table.index(UpperCAmelCase_ ) , 5 )
lowercase__, lowercase__: Dict = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
lowercase__: Dict = generate_table(UpperCAmelCase_ )
lowercase__: Optional[Any] = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(UpperCAmelCase_ , 2 ):
lowercase__, lowercase__: List[Any] = divmod(table.index(UpperCAmelCase_ ) , 5 )
lowercase__, lowercase__: int = divmod(table.index(UpperCAmelCase_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 177 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case ( lowercase , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = BlenderbotSmallTokenizer
_lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
lowerCamelCase_ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
lowerCamelCase_ = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
lowerCamelCase_ = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase ) )
def snake_case ( self , **UpperCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = "adapt act apte"
lowerCamelCase_ = "adapt act apte"
return input_text, output_text
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = "adapt act apte"
lowerCamelCase_ = ["adapt", "act", "ap@@", "te"]
lowerCamelCase_ = tokenizer.tokenize(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCamelCase_ = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , UpperCamelCase )
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
lowerCamelCase_ = "I am a small frog."
lowerCamelCase_ = tok([src_text] , padding=UpperCamelCase , truncation=UpperCamelCase )["input_ids"]
lowerCamelCase_ = tok.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
lowerCamelCase_ = "I am a small frog ."
lowerCamelCase_ = "."
lowerCamelCase_ = tok(UpperCamelCase )["input_ids"]
lowerCamelCase_ = tok(UpperCamelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 55 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 370 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = SMALL_MODEL_IDENTIFIER
snake_case__ : Any = "pt"
snake_case__ : Any = "tf"
def _lowercase ( self : Union[str, Any] , __A : List[Any] ):
snake_case__ : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__A )
def _lowercase ( self : Optional[int] , __A : Tuple ):
snake_case__ : List[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__A )
model_tf.save_pretrained(__A )
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model , __A )
self.assertEqual(__A , __A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : Optional[int] = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : int = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
def _lowercase ( self : Dict ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : List[str] = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : Tuple = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__A ):
snake_case__ : int = FeaturesManager.determine_framework(__A )
def _lowercase ( self : Dict ):
snake_case__ : Dict = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ):
snake_case__ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case__ : Tuple = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_tf )
# Both in environment -> use PyTorch
snake_case__ : Dict = MagicMock(return_value=__A )
snake_case__ : Optional[int] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# Both not in environment -> raise error
snake_case__ : List[str] = MagicMock(return_value=__A )
snake_case__ : Optional[Any] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
with self.assertRaises(__A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 286 | 0 |
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : Union[str, Any] ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : Dict ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : str ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : Optional[int] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : Optional[int] ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Tuple ) -> str:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = ["sentencepiece"]
def __init__( self : Optional[Any] ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = ["sentencepiece"]
def __init__( self : List[str] ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Any ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : List[str] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : str ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[str] = ["sentencepiece"]
def __init__( self : str ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Tuple = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : str ,**lowerCamelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : List[str] ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : Any ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Tuple ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : int ,**lowerCamelCase__ : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Optional[int] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ["sentencepiece"]
def __init__( self : Dict ,*lowerCamelCase__ : List[str] ,**lowerCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[str] = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[str] = ["sentencepiece"]
def __init__( self : int ,*lowerCamelCase__ : Dict ,**lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : int ) -> str:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : Any ,**lowerCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Dict = ["sentencepiece"]
def __init__( self : Any ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : int ) -> Any:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[int] = ["sentencepiece"]
def __init__( self : List[Any] ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : List[Any] = ["sentencepiece"]
def __init__( self : Optional[int] ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
class UpperCamelCase__ ( metaclass=lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = ["sentencepiece"]
def __init__( self : str ,*lowerCamelCase__ : Union[str, Any] ,**lowerCamelCase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self ,["""sentencepiece"""] )
| 296 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__snake_case : Optional[str] = field(
default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 296 | 1 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :str ) -> bool:
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE ) + 1
__lowerCAmelCase : Any = len(SCREAMING_SNAKE_CASE ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowerCAmelCase : List[str] = [[0 for i in range(SCREAMING_SNAKE_CASE )] for j in range(SCREAMING_SNAKE_CASE )]
# since string of zero length match pattern of zero length
__lowerCAmelCase : List[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , SCREAMING_SNAKE_CASE ):
for j in range(1 , SCREAMING_SNAKE_CASE ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowerCAmelCase : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowerCAmelCase : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowerCAmelCase : int = dp[i - 1][j]
else:
__lowerCAmelCase : Any = 0
else:
__lowerCAmelCase : Tuple = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCAmelCase = 'aab'
_UpperCAmelCase = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 368 |
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> bytes:
__lowerCAmelCase : List[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__lowerCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
_UpperCAmelCase = input('Enter Video/IGTV url: ').strip()
_UpperCAmelCase = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''') | 232 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class A__ ( A__ ):
"""simple docstring"""
__magic_name__ = 'beit'
def __init__( self , __snake_case=8_1_9_2 , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=3_0_7_2 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1E-12 , __snake_case=2_2_4 , __snake_case=1_6 , __snake_case=3 , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=0.1 , __snake_case=0.1 , __snake_case=True , __snake_case=[3, 5, 7, 1_1] , __snake_case=[1, 2, 3, 6] , __snake_case=True , __snake_case=0.4 , __snake_case=2_5_6 , __snake_case=1 , __snake_case=False , __snake_case=2_5_5 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = use_mask_token
snake_case = use_absolute_position_embeddings
snake_case = use_relative_position_bias
snake_case = use_shared_relative_position_bias
snake_case = layer_scale_init_value
snake_case = drop_path_rate
snake_case = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case = out_indices
snake_case = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case = use_auxiliary_head
snake_case = auxiliary_loss_weight
snake_case = auxiliary_channels
snake_case = auxiliary_num_convs
snake_case = auxiliary_concat_input
snake_case = semantic_loss_ignore_index
class A__ ( A__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a_ ( self ):
return 1E-4
| 127 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 207 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A_ = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 296 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = '''src/diffusers'''
A_ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
A_ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ = spec.loader.load_module()
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any] ) ->Any:
return line.startswith(UpperCAmelCase__ ) or len(UpperCAmelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""", UpperCAmelCase__ ) is not None
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Union[str, Any]:
A__ : Any = object_name.split(""".""" )
A__ : int = 0
# First let's find the module where our object lives.
A__ : str = parts[i]
while i < len(UpperCAmelCase__ ) and not os.path.isfile(os.path.join(UpperCAmelCase__, f'{module}.py' ) ):
i += 1
if i < len(UpperCAmelCase__ ):
A__ : Union[str, Any] = os.path.join(UpperCAmelCase__, parts[i] )
if i >= len(UpperCAmelCase__ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(UpperCAmelCase__, f'{module}.py' ), """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : List[Any] = f.readlines()
# Now let's find the class / func in the code!
A__ : Optional[Any] = """"""
A__ : Any = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase__ ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)', lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A__ : List[Any] = line_index
while line_index < len(UpperCAmelCase__ ) and _should_continue(lines[line_index], UpperCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : List[Any] = lines[start_index:line_index]
return "".join(UpperCAmelCase__ )
A_ = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
A_ = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
A_ = re.compile(r'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( UpperCAmelCase__ : List[str] ) ->Optional[Any]:
A__ : Dict = code.split("""\n""" )
A__ : List[Any] = 0
while idx < len(UpperCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase__ ):
return re.search(R"""^(\s*)\S""", lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->int:
A__ : str = len(get_indent(UpperCAmelCase__ ) ) > 0
if has_indent:
A__ : Union[str, Any] = f'class Bla:\n{code}'
A__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=1_1_9, preview=UpperCAmelCase__ )
A__ : Tuple = black.format_str(UpperCAmelCase__, mode=UpperCAmelCase__ )
A__ , A__ : List[Any] = style_docstrings_in_code(UpperCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def _lowerCAmelCase ( UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict=False ) ->List[Any]:
with open(UpperCAmelCase__, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
A__ : int = f.readlines()
A__ : Dict = []
A__ : List[str] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase__ ):
A__ : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A__ , A__ , A__ : Dict = search.groups()
A__ : Tuple = find_code_in_diffusers(UpperCAmelCase__ )
A__ : int = get_indent(UpperCAmelCase__ )
A__ : List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
A__ : Tuple = theoretical_indent
A__ : Optional[Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A__ : Tuple = True
while line_index < len(UpperCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase__ ):
break
A__ : Optional[int] = lines[line_index]
A__ : Tuple = _should_continue(UpperCAmelCase__, UpperCAmelCase__ ) and re.search(f'^{indent}# End copy', UpperCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : Dict = lines[start_index:line_index]
A__ : Tuple = """""".join(UpperCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
A__ : Optional[int] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase__ ) is None]
A__ : Optional[Any] = """\n""".join(UpperCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase__ ) > 0:
A__ : int = replace_pattern.replace("""with""", """""" ).split(""",""" )
A__ : List[Any] = [_re_replace_pattern.search(UpperCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A__ , A__ , A__ : Union[str, Any] = pattern.groups()
A__ : Union[str, Any] = re.sub(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
if option.strip() == "all-casing":
A__ : List[Any] = re.sub(obja.lower(), obja.lower(), UpperCAmelCase__ )
A__ : Tuple = re.sub(obja.upper(), obja.upper(), UpperCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A__ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
A__ : List[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A__ : List[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
A__ : Tuple = start_index + 1
if overwrite and len(UpperCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(UpperCAmelCase__, """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
return diffs
def _lowerCAmelCase ( UpperCAmelCase__ : bool = False ) ->Any:
A__ : Dict = glob.glob(os.path.join(UpperCAmelCase__, """**/*.py""" ), recursive=UpperCAmelCase__ )
A__ : str = []
for filename in all_files:
A__ : Any = is_copy_consistent(UpperCAmelCase__, UpperCAmelCase__ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(UpperCAmelCase__ ) > 0:
A__ : Any = """\n""".join(UpperCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 296 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_a = logging.get_logger(__name__)
_a = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase ):
lowercase__ = 'focalnet'
def __init__( self , __a=2_24 , __a=4 , __a=3 , __a=96 , __a=False , __a=[1_92, 3_84, 7_68, 7_68] , __a=[2, 2, 6, 2] , __a=[2, 2, 2, 2] , __a=[3, 3, 3, 3] , __a="gelu" , __a=4.0 , __a=0.0 , __a=0.1 , __a=False , __a=1e-4 , __a=False , __a=False , __a=False , __a=0.02 , __a=1e-5 , __a=32 , __a=None , __a=None , **__a , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = embed_dim
_UpperCamelCase = use_conv_embed
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = focal_levels
_UpperCamelCase = focal_windows
_UpperCamelCase = hidden_act
_UpperCamelCase = mlp_ratio
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = drop_path_rate
_UpperCamelCase = use_layerscale
_UpperCamelCase = layerscale_value
_UpperCamelCase = use_post_layernorm
_UpperCamelCase = use_post_layernorm_in_modulation
_UpperCamelCase = normalize_modulator
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = encoder_stride
_UpperCamelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)]
_UpperCamelCase , _UpperCamelCase = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names)
| 194 |
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
_a = """http://www.mocksite.com/file1.txt"""
_a = """\"text\": [\"foo\", \"foo\"]"""
_a = """6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _UpperCAmelCase:
lowercase__ = 2_00
lowercase__ = {'Content-Length': '100'}
lowercase__ = {}
def UpperCAmelCase ( self , **__a) -> Optional[int]:
'''simple docstring'''
return [bytes(__a , '''utf-8''')]
def lowerCamelCase__ ( *__snake_case, **__snake_case ) -> int:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('''urls_type''', [str, list, dict] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Dict:
"""simple docstring"""
import requests
monkeypatch.setattr(__snake_case, '''request''', __snake_case )
_UpperCamelCase = URL
if issubclass(__snake_case, __snake_case ):
_UpperCamelCase = url
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = [url]
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = {'''train''': url}
_UpperCamelCase = '''dummy'''
_UpperCamelCase = '''downloads'''
_UpperCamelCase = tmp_path
_UpperCamelCase = DownloadConfig(
cache_dir=os.path.join(__snake_case, __snake_case ), use_etag=__snake_case, )
_UpperCamelCase = DownloadManager(dataset_name=__snake_case, download_config=__snake_case )
_UpperCamelCase = dl_manager.download(__snake_case )
_UpperCamelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = [downloaded_paths]
_UpperCamelCase = [urls]
elif isinstance(__snake_case, __snake_case ):
assert "train" in downloaded_paths.keys()
_UpperCamelCase = downloaded_paths.values()
_UpperCamelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__snake_case, __snake_case ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_UpperCamelCase = Path(__snake_case )
_UpperCamelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_UpperCamelCase = downloaded_path.read_text()
assert content == CONTENT
_UpperCamelCase = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
_UpperCamelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''', [str, list, dict] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = str(__snake_case )
if issubclass(__snake_case, __snake_case ):
_UpperCamelCase = filename
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = [filename]
elif issubclass(__snake_case, __snake_case ):
_UpperCamelCase = {'''train''': filename}
_UpperCamelCase = '''dummy'''
_UpperCamelCase = xz_file.parent
_UpperCamelCase = '''extracted'''
_UpperCamelCase = DownloadConfig(
cache_dir=__snake_case, use_etag=__snake_case, )
_UpperCamelCase = DownloadManager(dataset_name=__snake_case, download_config=__snake_case )
_UpperCamelCase = dl_manager.extract(__snake_case )
_UpperCamelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = [extracted_paths]
_UpperCamelCase = [paths]
elif isinstance(__snake_case, __snake_case ):
assert "train" in extracted_paths.keys()
_UpperCamelCase = extracted_paths.values()
_UpperCamelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__snake_case, __snake_case ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_UpperCamelCase = Path(__snake_case )
_UpperCamelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__snake_case, etag=__snake_case )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_UpperCamelCase = extracted_path.read_text()
_UpperCamelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(__snake_case, start=1 ):
_UpperCamelCase = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''', ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = request.getfixturevalue(__snake_case )
_UpperCamelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
_test_jsonl(__snake_case, __snake_case )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''', ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowerCamelCase__ ( __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = request.getfixturevalue(__snake_case )
_UpperCamelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__snake_case ), start=1 ):
_test_jsonl(__snake_case, __snake_case )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__snake_case ), start=1 ):
assert os.path.basename(__snake_case ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 194 | 1 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( _lowerCamelCase : str = "https://www.worldometers.info/coronavirus" ):
A__ = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
A__ = soup.findAll("h1" )
A__ = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 358 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ = F"Input value of [number={number}] must be an integer"
raise TypeError(_lowerCamelCase )
if number < 1:
A__ = F"Input value of [number={number}] must be > 0"
raise ValueError(_lowerCamelCase )
A__ = 1
for i in range(1 , _lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 | 0 |
def __lowercase ( lowerCamelCase : List[Any] ):
UpperCamelCase_ : Union[str, Any] = [0] * len(lowerCamelCase )
UpperCamelCase_ : int = []
UpperCamelCase_ : str = []
UpperCamelCase_ : str = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(lowerCamelCase )
while queue:
UpperCamelCase_ : str = queue.pop(0 )
cnt += 1
topo.append(lowerCamelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase )
if cnt != len(lowerCamelCase ):
print('Cycle exists' )
else:
print(lowerCamelCase )
# Adjacency List of Graph
a_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 175 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 38 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 160 |
'''simple docstring'''
import os
import sys
import unittest
lowercase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowercase : List[Any] = os.path.join(git_repo_path, "src", "diffusers")
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = find_backend(' if not is_torch_available():' )
self.assertEqual(lowerCAmelCase_ , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_snake_case = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(lowerCAmelCase_ , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_snake_case = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(lowerCAmelCase_ , 'torch_and_transformers_and_onnx' )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowerCAmelCase_ )
self.assertIn('torch_and_transformers' , lowerCAmelCase_ )
self.assertIn('flax_and_transformers' , lowerCAmelCase_ )
self.assertIn('torch_and_transformers_and_onnx' , lowerCAmelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowerCAmelCase_ , '\nCONSTANT = None\n' )
_snake_case = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowerCAmelCase_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
_snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
_snake_case = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
_snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowerCAmelCase_ )
| 160 | 1 |
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Optional[int] , snake_case_ : Optional[Any]=sys.maxsize ):
snake_case__ : List[str] = """bilinear"""
snake_case__ : Tuple = max_size
snake_case__ : Union[str, Any] = short_edge_length
def __call__( self : str , snake_case_ : List[Any] ):
snake_case__ : Union[str, Any] = []
for img in imgs:
snake_case__ , snake_case__ : Optional[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
snake_case__ : List[str] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
snake_case__ : Any = size * 1.0 / min(snake_case_ , snake_case_ )
if h < w:
snake_case__ , snake_case__ : Optional[int] = size, scale * w
else:
snake_case__ , snake_case__ : List[str] = scale * h, size
if max(snake_case_ , snake_case_ ) > self.max_size:
snake_case__ : int = self.max_size * 1.0 / max(snake_case_ , snake_case_ )
snake_case__ : List[str] = newh * scale
snake_case__ : str = neww * scale
snake_case__ : List[Any] = int(neww + 0.5 )
snake_case__ : Optional[int] = int(newh + 0.5 )
if img.dtype == np.uinta:
snake_case__ : Dict = Image.fromarray(snake_case_ )
snake_case__ : str = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
snake_case__ : str = np.asarray(snake_case_ )
else:
snake_case__ : List[str] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
snake_case__ : Any = nn.functional.interpolate(
snake_case_ , (newh, neww) , mode=self.interp_method , align_corners=snake_case_ ).squeeze(0 )
img_augs.append(snake_case_ )
return img_augs
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : Any ):
snake_case__ : Optional[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
snake_case__ : List[str] = cfg.INPUT.FORMAT
snake_case__ : Any = cfg.SIZE_DIVISIBILITY
snake_case__ : Tuple = cfg.PAD_VALUE
snake_case__ : Any = cfg.INPUT.MAX_SIZE_TEST
snake_case__ : Optional[Any] = cfg.MODEL.DEVICE
snake_case__ : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
snake_case__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
snake_case__ : Any = lambda snake_case_ : (x - self.pixel_mean) / self.pixel_std
def lowerCamelCase ( self : str , snake_case_ : int ):
snake_case__ : Optional[int] = tuple(max(snake_case_ ) for s in zip(*[img.shape for img in images] ) )
snake_case__ : Dict = [im.shape[-2:] for im in images]
snake_case__ : str = [
nn.functional.pad(
snake_case_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case_ , snake_case_ )
]
return torch.stack(snake_case_ ), torch.tensor(snake_case_ )
def __call__( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : List[Any]=False ):
with torch.no_grad():
if not isinstance(snake_case_ , snake_case_ ):
snake_case__ : List[Any] = [images]
if single_image:
assert len(snake_case_ ) == 1
for i in range(len(snake_case_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case_ , images.pop(snake_case_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case_ , torch.as_tensor(img_tensorize(images.pop(snake_case_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
snake_case__ : Any = torch.tensor([im.shape[:2] for im in images] )
snake_case__ : Union[str, Any] = self.aug(snake_case_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
snake_case__ : Optional[Any] = [self.normalizer(snake_case_ ) for x in images]
# now pad them to do the following operations
snake_case__ , snake_case__ : Optional[int] = self.pad(snake_case_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
snake_case__ : Any = torch.true_divide(snake_case_ , snake_case_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> str:
assert torch.isfinite(_lowerCAmelCase ).all(), "Box tensor contains infinite or NaN!"
snake_case__ , snake_case__ : Any = box_size
tensor[:, 0].clamp_(min=0 , max=_lowerCAmelCase )
tensor[:, 1].clamp_(min=0 , max=_lowerCAmelCase )
tensor[:, 2].clamp_(min=0 , max=_lowerCAmelCase )
tensor[:, 3].clamp_(min=0 , max=_lowerCAmelCase )
| 35 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 254 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase_ )
class UpperCAmelCase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : str ,**A : Optional[Any] ):
super().__init__(**__lowerCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self ,"vision" )
self.check_model_type(__lowerCAmelCase )
def __call__( self : int ,A : Union[str, "Image.Image", List[Dict[str, Any]]] ,A : Union[str, List[str]] = None ,**A : Dict ,):
if "text_queries" in kwargs:
__A = kwargs.pop("text_queries" )
if isinstance(__lowerCAmelCase ,(str, Image.Image) ):
__A = {"image": image, "candidate_labels": candidate_labels}
else:
__A = image
__A = super().__call__(__lowerCAmelCase ,**__lowerCAmelCase )
return results
def UpperCamelCase_ ( self : Tuple ,**A : Union[str, Any] ):
__A = {}
if "threshold" in kwargs:
__A = kwargs["threshold"]
if "top_k" in kwargs:
__A = kwargs["top_k"]
return {}, {}, postprocess_params
def UpperCamelCase_ ( self : str ,A : int ):
__A = load_image(inputs["image"] )
__A = inputs["candidate_labels"]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
__A = candidate_labels.split("," )
__A = torch.tensor([[image.height, image.width]] ,dtype=torch.intaa )
for i, candidate_label in enumerate(__lowerCAmelCase ):
__A = self.tokenizer(__lowerCAmelCase ,return_tensors=self.framework )
__A = self.image_processor(__lowerCAmelCase ,return_tensors=self.framework )
yield {
"is_last": i == len(__lowerCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase_ ( self : Any ,A : Union[str, Any] ):
__A = model_inputs.pop("target_size" )
__A = model_inputs.pop("candidate_label" )
__A = model_inputs.pop("is_last" )
__A = self.model(**__lowerCAmelCase )
__A = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[str] ,A : List[str]=0.1 ,A : Dict=None ):
__A = []
for model_output in model_outputs:
__A = model_output["candidate_label"]
__A = BaseModelOutput(__lowerCAmelCase )
__A = self.image_processor.post_process_object_detection(
outputs=__lowerCAmelCase ,threshold=__lowerCAmelCase ,target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
__A = outputs["scores"][index].item()
__A = self._get_bounding_box(outputs["boxes"][index][0] )
__A = {"score": score, "label": label, "box": box}
results.append(__lowerCAmelCase )
__A = sorted(__lowerCAmelCase ,key=lambda A : x["score"] ,reverse=__lowerCAmelCase )
if top_k:
__A = results[:top_k]
return results
def UpperCamelCase_ ( self : List[str] ,A : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
__A , __A , __A , __A = box.int().tolist()
__A = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 358 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"audio": Audio()} )
snake_case_ = Features({"transcription": Value("string" )} )
snake_case_ = "audio"
snake_case_ = "transcription"
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,A ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
__A = copy.deepcopy(self )
__A = self.input_schema.copy()
__A = features[self.audio_column]
__A = input_schema
return task_template
@property
def UpperCamelCase_ ( self : int ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 124 | 0 |
'''simple docstring'''
from collections.abc import Generator
def A__ ( ):
_UpperCamelCase , _UpperCamelCase : Tuple = 0, 1
while True:
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = b, a + b
yield b
def A__ ( UpperCAmelCase_ = 1_0_0_0 ):
_UpperCamelCase : List[Any] = 1
_UpperCamelCase : str = fibonacci_generator()
while len(str(next(UpperCAmelCase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 83 | '''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : str
a__ : str
a__ : Optional[str] = None
a__ : Optional[str] = None
a__ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class __A :
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
a__ : Optional[Union[int, float]] = None
a__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( UpperCamelCase__ ):
a__ : List[InputFeatures]
def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ = torch.load(__a )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__(self : List[Any] ):
return len(self.features )
def __getitem__(self : Any , __a : Optional[Any] ):
return self.features[i]
def _lowercase (self : Union[str, Any] ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
a__ : List[InputFeatures]
def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowercase (self : int ):
return self.dataset
def __len__(self : Any ):
return len(self.features )
def __getitem__(self : int , __a : Union[str, Any] ):
return self.features[i]
def _lowercase (self : int ):
return self.label_list
class __A ( UpperCamelCase__ ):
def _lowercase (self : List[Any] , __a : Dict ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowercase (self : Any , __a : List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowercase (self : Any ):
return ["contradiction", "entailment", "neutral"]
def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
for i, line in enumerate(__a ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
SCREAMING_SNAKE_CASE_: int ={
'hans': 3,
}
SCREAMING_SNAKE_CASE_: Any ={
'hans': HansProcessor,
}
| 1 | 0 |
from ....utils import logging
__lowerCamelCase : Any = logging.get_logger(__name__)
class A__ ( __snake_case ):
def __init__( self , A_ , A_=None , A_=2048 ):
'''simple docstring'''
UpperCamelCase : List[str] = config.__dict__
UpperCamelCase : List[Any] = modal_hidden_size
if num_labels:
UpperCamelCase : List[str] = num_labels
| 140 |
from scipy.stats import spearmanr
import datasets
__lowerCamelCase : List[str] = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
__lowerCamelCase : List[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
__lowerCamelCase : Optional[int] = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def __UpperCamelCase( self , A_ , A_ , A_=False ):
'''simple docstring'''
UpperCamelCase : Tuple = spearmanr(A_ , A_ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 140 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
lowerCamelCase = logging.getLogger(__name__)
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Dict = git.Repo(search_parent_directories=_a )
lowerCAmelCase__ : Union[str, Any] = {
'''repo_id''': str(_a ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(_a , '''git_log.json''' ) , '''w''' ) as f:
json.dump(_a , _a , indent=4 )
def lowerCamelCase_ ( _a ):
"""simple docstring"""
if params.n_gpu <= 0:
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Union[str, Any] = -1
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Union[str, Any] = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
lowerCAmelCase__ : int = int(os.environ['''WORLD_SIZE'''] )
lowerCAmelCase__ : List[Any] = int(os.environ['''N_GPU_NODE'''] )
lowerCAmelCase__ : List[Any] = int(os.environ['''RANK'''] )
# number of nodes / node ID
lowerCAmelCase__ : List[Any] = params.world_size // params.n_gpu_per_node
lowerCAmelCase__ : Union[str, Any] = params.global_rank // params.n_gpu_per_node
lowerCAmelCase__ : Optional[int] = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
lowerCAmelCase__ : Tuple = 1
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : Union[str, Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
lowerCAmelCase__ : str = params.node_id == 0 and params.local_rank == 0
lowerCAmelCase__ : Tuple = params.n_nodes > 1
# summary
lowerCAmelCase__ : Optional[Any] = f'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def lowerCamelCase_ ( _a ):
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 131 |
import unittest
from transformers import DonutProcessor
lowerCamelCase = '''naver-clova-ix/donut-base'''
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : str )-> int:
lowerCAmelCase__ : Any = DonutProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
lowerCAmelCase__ : Dict = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
lowerCAmelCase__ : Any = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
lowerCAmelCase__ : str = self.processor.tokenajson(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 131 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase_ (unittest.TestCase ):
def __init__( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : Dict=18 , lowerCAmelCase_ : Optional[int]=30 , lowerCAmelCase_ : List[Any]=400 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Dict=True , ) -> Tuple:
UpperCAmelCase_ : Optional[int] = size if size is not None else {"height": 18, "width": 18}
UpperCAmelCase_ : Optional[Any] = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : List[str] = image_size
UpperCAmelCase_ : str = min_resolution
UpperCAmelCase_ : Optional[Any] = max_resolution
UpperCAmelCase_ : Dict = do_resize
UpperCAmelCase_ : List[Any] = size
UpperCAmelCase_ : Any = apply_ocr
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCamelCase_ (lowerCamelCase_ , unittest.TestCase ):
__magic_name__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
UpperCAmelCase_ : Tuple = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "apply_ocr" ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCAmelCase_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
pass
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
UpperCAmelCase_ : str = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase__ )
self.assertIsInstance(encoding.boxes , lowerCAmelCase__ )
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
# Initialize image_processing
UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
UpperCAmelCase_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
# with apply_OCR = True
UpperCAmelCase_ : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCAmelCase_ : Optional[Any] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
UpperCAmelCase_ : List[Any] = Image.open(ds[0]["file"] ).convert("RGB" )
UpperCAmelCase_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCAmelCase_ : Union[str, Any] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
UpperCAmelCase_ : Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase__ )
self.assertListEqual(encoding.boxes , lowerCAmelCase__ )
# with apply_OCR = False
UpperCAmelCase_ : int = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ )
UpperCAmelCase_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 366 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCamelCase_ :
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : MutableSequence[float] ) -> None:
if len(lowerCAmelCase_ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
UpperCAmelCase_ : list[float] = list(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = degree
def __add__( self : int , lowerCAmelCase_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
UpperCAmelCase_ : List[str] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCAmelCase_ )
else:
UpperCAmelCase_ : Optional[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCAmelCase_ )
def __sub__( self : Union[str, Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[str] ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[Any] , lowerCAmelCase_ : Polynomial ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int | float ) -> int | float:
UpperCAmelCase_ : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
UpperCAmelCase_ : str = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCAmelCase_ )
return polynomial
def __repr__( self : Union[str, Any] ) -> str:
return self.__str__()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_ : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int | float = 0 ) -> Polynomial:
UpperCAmelCase_ : list[float] = [0] * (self.degree + 2)
UpperCAmelCase_ : List[Any] = constant
for i in range(self.degree + 1 ):
UpperCAmelCase_ : Union[str, Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCAmelCase_ )
def __eq__( self : Union[str, Any] , lowerCAmelCase_ : object ) -> bool:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , lowerCAmelCase_ : object ) -> bool:
return not self.__eq__(lowerCAmelCase_ )
| 253 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Tuple ={
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] =[
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 189 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase : Any =logging.get_logger(__name__)
class __a ( A__ ):
_lowerCAmelCase : List[str] = ['''pixel_values''']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_55 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
UpperCamelCase__ : int = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" )
UpperCamelCase__ : Dict = do_resize
UpperCamelCase__ : List[str] = size
UpperCamelCase__ : int = resample
UpperCamelCase__ : Optional[int] = do_rescale
UpperCamelCase__ : List[Any] = rescale_factor
UpperCamelCase__ : Union[str, Any] = do_center_crop
UpperCamelCase__ : int = crop_size
UpperCamelCase__ : Optional[int] = do_flip_channel_order
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PIL.Image.BILINEAR , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCamelCase__ : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[int, float] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : List[str] = resample if resample is not None else self.resample
UpperCamelCase__ : Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : Any = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase__ : Optional[int] = size if size is not None else self.size
UpperCamelCase__ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" )
UpperCamelCase__ : int = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ : Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCamelCase__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCamelCase__ : Any = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase__ : Any = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Tuple] = None ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = target_sizes.numpy()
UpperCamelCase__ : Any = []
for idx in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : List[str] = logits.argmax(dim=1 )
UpperCamelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 189 | 1 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Any = AutoencoderKL
A_ : Tuple = 'sample'
A_ : List[str] = 1e-2
@property
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
return {"sample": image}
@property
def a (self : str ):
"""simple docstring"""
return (3, 32, 32)
@property
def a (self : Any ):
"""simple docstring"""
return (3, 32, 32)
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a (self : List[Any] ):
"""simple docstring"""
pass
def a (self : Any ):
"""simple docstring"""
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def a (self : List[str] ):
"""simple docstring"""
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**a__ )
model.to(a__ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**a__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(a__ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**a__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(a__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**a__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case , __snake_case = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(a__ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a (self : int ):
"""simple docstring"""
__snake_case = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
__snake_case = model.to(a__ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=a__ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(a__ )
with torch.no_grad():
__snake_case = model(a__ , sample_posterior=a__ , generator=a__ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(a__ , a__ , rtol=1E-2 ) )
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : int , a__ : Dict , a__ : int ):
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(a__ ) for s in shape] )}.npy"""
def a (self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a (self : Union[str, Any] , a__ : List[Any]=0 , a__ : Union[str, Any]=(4, 3, 512, 512) , a__ : List[Any]=False ):
"""simple docstring"""
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(a__ , a__ ) ) ).to(a__ ).to(a__ )
return image
def a (self : int , a__ : List[str]="CompVis/stable-diffusion-v1-4" , a__ : Tuple=False ):
"""simple docstring"""
__snake_case = '''fp16''' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
a__ , subfolder='''vae''' , torch_dtype=a__ , revision=a__ , )
model.to(a__ ).eval()
return model
def a (self : str , a__ : Tuple=0 ):
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(a__ )
return torch.Generator(device=a__ ).manual_seed(a__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a (self : Tuple , a__ : str , a__ : Optional[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(a__ )
__snake_case = self.get_generator(a__ )
with torch.no_grad():
__snake_case = model(a__ , generator=a__ , sample_posterior=a__ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(a__ , a__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a (self : Optional[Any] , a__ : List[str] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model(fpaa=a__ )
__snake_case = self.get_sd_image(a__ , fpaa=a__ )
__snake_case = self.get_generator(a__ )
with torch.no_grad():
__snake_case = model(a__ , generator=a__ , sample_posterior=a__ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a (self : str , a__ : Optional[int] , a__ : Optional[int] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(a__ )
with torch.no_grad():
__snake_case = model(a__ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(a__ , a__ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a (self : List[str] , a__ : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(a__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a (self : int , a__ : Optional[int] , a__ : str ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model(fpaa=a__ )
__snake_case = self.get_sd_image(a__ , shape=(3, 4, 64, 64) , fpaa=a__ )
with torch.no_grad():
__snake_case = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(a__ )
assert torch_all_close(a__ , a__ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def a (self : List[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model(fpaa=a__ )
__snake_case = self.get_sd_image(a__ , shape=(3, 4, 64, 64) , fpaa=a__ )
with torch.no_grad():
__snake_case = model.decode(a__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(a__ , a__ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def a (self : Optional[Any] , a__ : int ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(a__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(a__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(a__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(a__ , a__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a (self : Optional[int] , a__ : List[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(a__ )
__snake_case = self.get_generator(a__ )
with torch.no_grad():
__snake_case = model.encode(a__ ).latent_dist
__snake_case = dist.sample(generator=a__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(a__ )
__snake_case = 3E-3 if torch_device != '''mps''' else 1E-2
assert torch_all_close(a__ , a__ , atol=a__ )
| 365 |
from pathlib import Path
import fire
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str , snake_case_ : int ) -> str:
__snake_case = Path(snake_case_ )
__snake_case = Path(snake_case_ )
dest_dir.mkdir(exist_ok=snake_case_ )
for path in src_dir.iterdir():
__snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
__snake_case = dest_dir.joinpath(path.name )
print(snake_case_ )
dest_path.open('''w''' ).write('''\n'''.join(snake_case_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 238 | 0 |
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : List[str] = '▁'
lowerCamelCase_ : Any = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
lowerCamelCase_ : List[Any] = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
lowerCamelCase_ : Union[str, Any] = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
lowerCamelCase_ : Optional[int] = {
'ernie-m-base': 5_14,
'ernie-m-large': 5_14,
}
lowerCamelCase_ : Dict = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : List[str] = ["input_ids"]
lowercase_ : Optional[Any] = VOCAB_FILES_NAMES
lowercase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowercase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self , snake_case_ , snake_case_=None , snake_case_=False , snake_case_="utf8" , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_ = None , **snake_case_ , ):
"""simple docstring"""
A_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , vocab_file=snake_case_ , encoding=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
A_ : List[Any] = do_lower_case
A_ : Optional[Any] = sentencepiece_model_ckpt
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
A_ : Optional[int] = self.load_vocab(filepath=snake_case_ )
else:
A_ : Tuple = {self.sp_model.id_to_piece(snake_case_ ): id for id in range(self.sp_model.get_piece_size() )}
A_ : Optional[Any] = {v: k for k, v in self.vocab.items()}
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if text is None:
return None
A_ : Any = self.tokenize(snake_case_ )
A_ , A_ : str = '', []
for i, ch in enumerate(snake_case_ ):
if ch in self.SP_CHAR_MAPPING:
A_ : int = self.SP_CHAR_MAPPING.get(snake_case_ )
else:
A_ : Optional[int] = unicodedata.normalize('NFKC' , snake_case_ )
if self.is_whitespace(snake_case_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case_ ) )
A_ , A_ , A_ : Dict = normalized_text, [], 0
if self.do_lower_case:
A_ : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
A_ : Union[str, Any] = token[1:]
A_ : Any = text[offset:].index(snake_case_ ) + offset
A_ : List[str] = start + len(snake_case_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
A_ : Tuple = end
return token_mapping
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return len(self.vocab )
def lowerCamelCase_ ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
"""simple docstring"""
A_ : List[Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self , snake_case_ ):
"""simple docstring"""
A_ : str = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A_ : List[Any] = {}
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return "".join((self.SP_CHAR_MAPPING.get(snake_case_ , snake_case_ ) for c in text) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=False , snake_case_=6_4 , snake_case_=0.1 ):
"""simple docstring"""
if self.sp_model_kwargs.get('enable_sampling' ) is True:
A_ : Dict = True
if self.sp_model_kwargs.get('alpha' ) is not None:
A_ : Optional[int] = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
A_ : List[str] = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
A_ : Union[str, Any] = self.sp_model.EncodeAsPieces(snake_case_ )
else:
A_ : str = self.sp_model.SampleEncodeAsPieces(snake_case_ , snake_case_ , snake_case_ )
A_ : str = []
for pi, piece in enumerate(snake_case_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case_ ) and pi != 0:
new_pieces.append(snake_case_ )
continue
else:
continue
A_ : int = 0
for i, chunk in enumerate(snake_case_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case_ ) or self.is_punct(snake_case_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case_ )
A_ : Union[str, Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A_ : Tuple = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
A_ : str = i
if len(snake_case_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Any = ''.join(snake_case_ ).replace(snake_case_ , ' ' ).strip()
return out_string
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Tuple = self.convert_ids_to_tokens(snake_case_ )
A_ : Tuple = ''.join(snake_case_ ).replace(snake_case_ , ' ' ).strip()
return out_string
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.vocab.get(snake_case_ , self.vocab.get(self.unk_token ) )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
return self.reverse_vocab.get(snake_case_ , self.unk_token )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : Any = [self.cls_token_id]
A_ : Tuple = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None , snake_case_=False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1]
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case_ ) + 1) + [1] * (len(snake_case_ ) + 3)
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case_ ) == 1:
A_ : Any = unicodedata.category(snake_case_ )
if cat == "Zs":
return True
return False
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = {}
with io.open(snake_case_ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(snake_case_ ):
A_ : Optional[int] = line.rstrip('\n' )
A_ : Optional[int] = int(snake_case_ )
return token_to_idx
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = None ):
"""simple docstring"""
A_ : List[str] = 0
if os.path.isdir(snake_case_ ):
A_ : Any = os.path.join(
snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
A_ : List[Any] = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(snake_case_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
A_ : Optional[Any] = token_index
writer.write(token + '\n' )
index += 1
A_ : List[Any] = os.path.join(snake_case_ , 'sentencepiece.bpe.model' )
with open(snake_case_ , 'wb' ) as fi:
A_ : Dict = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (vocab_file,) | 286 |
"""simple docstring"""
import os
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Any = os.path.join(os.path.dirname(_UpperCAmelCase ) , 'num.txt' )
with open(_UpperCAmelCase ) as file_hand:
return str(sum(int(_UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 286 | 1 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 79 |
"""simple docstring"""
from datetime import datetime
import requests
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
a : Optional[Any] = "https://downloadgram.net/wp-json/wppress/video-downloader/video?url="
a : int = requests.get(base_url + url ).json()[0]["urls"][0]["src"]
return requests.get(_lowercase ).content
if __name__ == "__main__":
a : str = input('''Enter Video/IGTV url: ''').strip()
a : str = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''')
| 79 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCamelCase_ ( lowerCAmelCase__ : str = "isbn/0140328726" ) -> dict:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
lowerCAmelCase_ : str = f"{olid} is not a valid Open Library olid"
raise ValueError(snake_case_ )
return requests.get(f"https://openlibrary.org/{new_olid}.json" ).json()
def UpperCamelCase_ ( lowerCAmelCase__ : dict ) -> dict:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
lowerCAmelCase_ : List[Any] = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCAmelCase_ : int = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
lowerCAmelCase_ : Optional[Any] = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case_ , snake_case_ ):
lowerCAmelCase_ : Optional[Any] = ', '.join(snake_case_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowercase__ : Union[str, Any] = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
lowercase__ : Optional[int] = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print("""\n""".join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 224 |
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 126 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ = 0
A_ = 1
A_ = 2
A_ = 3
A_ = 4
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 'left'
def __init__( self : Dict , snake_case : int , snake_case : List[Any]=False , snake_case : List[str]=True , snake_case : Dict=False , snake_case : Optional[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : Tuple="<unk>" , snake_case : Tuple="<sep>" , snake_case : Union[str, Any]="<pad>" , snake_case : Dict="<cls>" , snake_case : Optional[Any]="<mask>" , snake_case : Optional[int]=["<eop>", "<eod>"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
A__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
A__ : str = 3
A__ : str = do_lower_case
A__ : Optional[Any] = remove_space
A__ : List[Any] = keep_accents
A__ : Union[str, Any] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
'''simple docstring'''
A__ : int = self.__dict__.copy()
A__ : int = None
return state
def __setstate__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Optional[int] = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
A__ : Optional[Any] = """ """.join(inputs.strip().split() )
else:
A__ : Dict = inputs
A__ : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A__ : Any = unicodedata.normalize("""NFKD""" , snake_case )
A__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
A__ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ):
'''simple docstring'''
A__ : Dict = self.preprocess_text(snake_case )
A__ : Dict = self.sp_model.encode(snake_case , out_type=snake_case )
A__ : Optional[int] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : int = cur_pieces[1:]
else:
A__ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _UpperCamelCase ( self : List[str] , snake_case : Tuple ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ):
'''simple docstring'''
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , snake_case )
A__ : Any = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Any = []
A__ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
A__ : str = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A__ : Dict = """""".join(snake_case )
A__ : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Tuple = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Tuple = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
A__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 360 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''spiece.model'''}
A_ = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
A_ = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
A_ = 0
A_ = 1
A_ = 2
A_ = 3
A_ = 4
class __SCREAMING_SNAKE_CASE ( UpperCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = 'left'
def __init__( self : Dict , snake_case : int , snake_case : List[Any]=False , snake_case : List[str]=True , snake_case : Dict=False , snake_case : Optional[Any]="<s>" , snake_case : List[str]="</s>" , snake_case : Tuple="<unk>" , snake_case : Tuple="<sep>" , snake_case : Union[str, Any]="<pad>" , snake_case : Dict="<cls>" , snake_case : Optional[Any]="<mask>" , snake_case : Optional[int]=["<eop>", "<eod>"] , snake_case : Optional[Dict[str, Any]] = None , **snake_case : Dict , ):
'''simple docstring'''
A__ : Optional[int] = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
A__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case , remove_space=snake_case , keep_accents=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , additional_special_tokens=snake_case , sp_model_kwargs=self.sp_model_kwargs , **snake_case , )
A__ : str = 3
A__ : str = do_lower_case
A__ : Optional[Any] = remove_space
A__ : List[Any] = keep_accents
A__ : Union[str, Any] = vocab_file
A__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = {self.convert_ids_to_tokens(snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
'''simple docstring'''
A__ : int = self.__dict__.copy()
A__ : int = None
return state
def __setstate__( self : Tuple , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A__ : Optional[int] = {}
A__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[str] , snake_case : Optional[Any] ):
'''simple docstring'''
if self.remove_space:
A__ : Optional[Any] = """ """.join(inputs.strip().split() )
else:
A__ : Dict = inputs
A__ : str = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
A__ : Any = unicodedata.normalize("""NFKD""" , snake_case )
A__ : Optional[int] = """""".join([c for c in outputs if not unicodedata.combining(snake_case )] )
if self.do_lower_case:
A__ : Any = outputs.lower()
return outputs
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str ):
'''simple docstring'''
A__ : Dict = self.preprocess_text(snake_case )
A__ : Dict = self.sp_model.encode(snake_case , out_type=snake_case )
A__ : Optional[int] = []
for piece in pieces:
if len(snake_case ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
A__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(snake_case , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : int = cur_pieces[1:]
else:
A__ : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(snake_case )
else:
new_pieces.append(snake_case )
return new_pieces
def _UpperCamelCase ( self : List[str] , snake_case : Tuple ):
'''simple docstring'''
return self.sp_model.PieceToId(snake_case )
def _UpperCamelCase ( self : List[str] , snake_case : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(snake_case )
def _UpperCamelCase ( self : Optional[int] , snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = """""".join(snake_case ).replace(snake_case , """ """ ).strip()
return out_string
def _UpperCamelCase ( self : int , snake_case : List[int] , snake_case : bool = False , snake_case : bool = None , snake_case : bool = True , **snake_case : Union[str, Any] , ):
'''simple docstring'''
A__ : List[str] = kwargs.pop("""use_source_tokenizer""" , snake_case )
A__ : Any = self.convert_ids_to_tokens(snake_case , skip_special_tokens=snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A__ : Any = []
A__ : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
A__ : str = []
sub_texts.append(snake_case )
else:
current_sub_text.append(snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
A__ : Dict = """""".join(snake_case )
A__ : int = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A__ : Tuple = self.clean_up_tokenization(snake_case )
return clean_text
else:
return text
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Tuple = [self.sep_token_id]
A__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCamelCase ( self : Dict , snake_case : List[int] , snake_case : Optional[List[int]] = None , snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is not None:
return ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1, 1]
return ([0] * len(snake_case )) + [1, 1]
def _UpperCamelCase ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ):
'''simple docstring'''
A__ : Any = [self.sep_token_id]
A__ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCamelCase ( self : Optional[Any] , snake_case : str , snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A__ : List[Any] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case , """wb""" ) as fi:
A__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case )
return (out_vocab_file,)
| 296 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase_ : Union[str, Any] = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "xlm-roberta"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-1_2 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
a =vocab_size
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =hidden_act
a =intermediate_size
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =max_position_embeddings
a =type_vocab_size
a =initializer_range
a =layer_norm_eps
a =position_embedding_type
a =use_cache
a =classifier_dropout
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 81 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : Any = {
"""microsoft/conditional-detr-resnet-50""": (
"""https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"""
),
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "conditional_detr"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=300 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.02 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=2 , __A=5 , __A=2 , __A=1 , __A=1 , __A=2 , __A=5 , __A=2 , __A=0.25 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
a =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__A , __A ):
a =backbone_config.get('''model_type''' )
a =CONFIG_MAPPING[backbone_model_type]
a =config_class.from_dict(__A )
a =use_timm_backbone
a =backbone_config
a =num_channels
a =num_queries
a =d_model
a =encoder_ffn_dim
a =encoder_layers
a =encoder_attention_heads
a =decoder_ffn_dim
a =decoder_layers
a =decoder_attention_heads
a =dropout
a =attention_dropout
a =activation_dropout
a =activation_function
a =init_std
a =init_xavier_std
a =encoder_layerdrop
a =decoder_layerdrop
a =encoder_layers
a =auxiliary_loss
a =position_embedding_type
a =backbone
a =use_pretrained_backbone
a =dilation
# Hungarian matcher
a =class_cost
a =bbox_cost
a =giou_cost
# Loss coefficients
a =mask_loss_coefficient
a =dice_loss_coefficient
a =cls_loss_coefficient
a =bbox_loss_coefficient
a =giou_loss_coefficient
a =focal_alpha
super().__init__(is_encoder_decoder=__A , **__A )
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return self.d_model
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
a =self.backbone_config.to_dict()
a =self.__class__.model_type
return output
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
return 12 | 81 | 1 |
'''simple docstring'''
import heapq
def _lowerCamelCase ( lowercase : dict ) -> set[int]:
_a = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase , [-1 * len(lowercase ), (key, value)] )
# chosen_vertices = set of chosen vertices
_a = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_a = heapq.heappop(lowercase )[1][0]
chosen_vertices.add(lowercase )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_a = elem[1][1].index(lowercase )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : int = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 354 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase_ : Tuple = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( lowercase : List[Any] ) -> Optional[int]:
_a = test_results.split(" " )
_a = 0
_a = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowercase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( lowercase : str ) -> Optional[Any]:
_a = {}
_a = None
_a = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , lowercase ):
_a = True
_a = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
_a = line
_a = False
return failures
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , __a : str , __a : Dict ):
_a = title
_a = doc_test_results["time_spent"].split("," )[0]
_a = doc_test_results["success"]
_a = doc_test_results["failures"]
_a = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a = doc_test_results
@property
def UpperCamelCase__ ( self : int ):
_a = [self._time_spent]
_a = 0
for time in time_spent:
_a = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__a ) == 1:
_a = [0, 0, time_parts[0]]
_a , _a , _a = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_a , _a , _a = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f'{int(__a )}h{int(__a )}m{int(__a )}s'
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def UpperCamelCase__ ( self : str ):
_a = 40
_a = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__a , __a )}
_a = ""
for category, failures in category_failures.items():
if len(__a ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def UpperCamelCase__ ( self : List[str] ):
_a = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__a )
@staticmethod
def UpperCamelCase__ ( ):
_a = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__a )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__a , )
def UpperCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
_a = f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else "All tests passed."
_a = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__a , )
def UpperCamelCase__ ( self : Dict , __a : List[str] , __a : List[Any] , __a : Tuple , __a : int ):
_a = ""
for key, value in failures.items():
_a = value[:2_00] + " [Truncated]" if len(__a ) > 2_50 else value
failures_text += f'*{key}*\n_{value}_\n\n'
_a = job_name
_a = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
_a = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self : str ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
_a = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
_a = sorted(self.doc_test_results.items() , key=lambda __a : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
_a = f'*Num failures* :{len(job_result["failed"] )} \n'
_a = job_result["failures"]
_a = self.get_reply_blocks(__a , __a , __a , text=__a )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=f'Results for {job}' , blocks=__a , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def _lowerCamelCase ( ) -> Any:
_a = os.environ["GITHUB_RUN_ID"]
_a = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
_a = requests.get(lowercase ).json()
_a = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_a = math.ceil((result["total_count"] - 100) / 100 )
for i in range(lowercase ):
_a = requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , lowercase )
return {}
def _lowerCamelCase ( lowercase : str ) -> Dict:
_a = {}
if os.path.exists(lowercase ):
_a = os.listdir(lowercase )
for file in files:
try:
with open(os.path.join(lowercase , lowercase ) , encoding="utf-8" ) as f:
_a = f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowercase , lowercase )}.' ) from e
return _artifact
def _lowerCamelCase ( ) -> str:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : str ):
_a = name
_a = []
def __str__( self : List[str] ):
return self.name
def UpperCamelCase__ ( self : str , __a : str ):
self.paths.append({"name": self.name, "path": path} )
_a = {}
_a = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a = directory
if artifact_name not in _available_artifacts:
_a = Artifact(lowercase )
_available_artifacts[artifact_name].add_path(lowercase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase_ : List[Any] = get_job_links()
lowerCAmelCase_ : Any = retrieve_available_artifacts()
lowerCAmelCase_ : List[str] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase_ : Optional[Any] = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase_ : int = github_actions_job_links.get('run_doctests')
lowerCAmelCase_ : Union[str, Any] = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowerCAmelCase_ : List[str] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = handle_test_results(artifact['stats'])
lowerCAmelCase_ : List[str] = failed
lowerCAmelCase_ : Optional[Any] = success
lowerCAmelCase_ : Tuple = time_spent[1:-1] + ', '
lowerCAmelCase_ : List[Any] = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowerCAmelCase_ : int = line.replace('FAILED ', '')
lowerCAmelCase_ : Optional[int] = line.split()[0].replace('\n', '')
if "::" in line:
lowerCAmelCase_ , lowerCAmelCase_ : str = line.split('::')
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase_ : Union[str, Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase_ : List[str] = all_failures[test] if test in all_failures else 'N/A'
lowerCAmelCase_ : Optional[Any] = failure
break
lowerCAmelCase_ : Tuple = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 346 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase__ = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
UpperCamelCase__ = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
UpperCamelCase__ = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowerCAmelCase_ ( __A, __A ) -> Dict:
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCAmelCase_ ( __A, __A ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = simple_accuracy(__A, __A )
UpperCAmelCase__ = float(fa_score(y_true=__A, y_pred=__A ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( __A, __A ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = np.array(__A )
UpperCAmelCase__ = np.array(__A )
UpperCAmelCase__ = en_sentvecs.shape[0]
# mean centering
UpperCAmelCase__ = en_sentvecs - np.mean(__A, axis=0 )
UpperCAmelCase__ = in_sentvecs - np.mean(__A, axis=0 )
UpperCAmelCase__ = cdist(__A, __A, "cosine" )
UpperCAmelCase__ = np.array(range(__A ) )
UpperCAmelCase__ = sim.argsort(axis=1 )[:, :10]
UpperCAmelCase__ = np.any(preds == actual[:, None], axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowercase_ (self : Optional[Any] ) -> List[str]:
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def lowercase_ (self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__UpperCAmelCase , __UpperCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__UpperCAmelCase , __UpperCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 65 | import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | 0 |
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class lowerCAmelCase__ ( A_ ):
__a = """facebook/nllb-200-distilled-600M"""
__a = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__a = """translator"""
__a = AutoTokenizer
__a = AutoModelForSeqaSeqLM
__a = LANGUAGE_CODES
__a = ["""text""", """text""", """text"""]
__a = ["""text"""]
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Dict ):
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
_snake_case = self.lang_to_code[src_lang]
_snake_case = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_lowerCamelCase , return_tensors='''pt''' , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase )
def lowercase ( self : Tuple , _lowerCamelCase : Optional[Any] ):
return self.model.generate(**_lowerCamelCase )
def lowercase ( self : Optional[Any] , _lowerCamelCase : str ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_lowerCamelCase )
| 362 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : dict ) -> str:
_snake_case = BeautifulSoup(requests.get(__lowerCamelCase , params=__lowerCamelCase ).content , '''html.parser''' )
_snake_case = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
_snake_case = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
UpperCAmelCase__ = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 40 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def lowercase_ ( _lowerCamelCase: str ) -> Union[str, Any]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__lowerCamelCase : Optional[int] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("encoder" ):
__lowerCamelCase : Any = k.replace(".attn" , ".self_attn" )
__lowerCamelCase : Any = k.replace("norm1" , "self_attn_layer_norm" )
__lowerCamelCase : Union[str, Any] = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
__lowerCamelCase : List[Any] = k.replace("norm1" , "self_attn_layer_norm" )
__lowerCamelCase : Optional[Any] = k.replace("norm2" , "encoder_attn_layer_norm" )
__lowerCamelCase : Union[str, Any] = k.replace("norm3" , "final_layer_norm" )
return k
def lowercase_ ( _lowerCamelCase: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
__lowerCamelCase : str = sd.pop(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
__lowerCamelCase : Dict = v
__A = ['''START''']
@torch.no_grad()
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: int , _lowerCamelCase: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase : int = torch.load(_lowerCamelCase , map_location="cpu" )
__lowerCamelCase : Union[str, Any] = model["model"]
__lowerCamelCase : Optional[Any] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__lowerCamelCase : Tuple = BlenderbotForConditionalGeneration(_lowerCamelCase )
__lowerCamelCase : Dict = m.model.state_dict().keys()
__lowerCamelCase : Tuple = []
__lowerCamelCase : str = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__lowerCamelCase : Any = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__lowerCamelCase : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
__A = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 135 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class _snake_case ( a__ ):
snake_case__ = "camembert"
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any]=30522 , UpperCAmelCase : Optional[int]=768 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Tuple=3072 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Tuple=1E-12 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , UpperCAmelCase : str="absolute" , UpperCAmelCase : Dict=True , UpperCAmelCase : int=None , **UpperCAmelCase : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : int = hidden_act
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : Any = initializer_range
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : Dict = use_cache
__lowerCamelCase : List[Any] = classifier_dropout
class _snake_case ( a__ ):
@property
def lowerCamelCase__ ( self : int ):
if self.task == "multiple-choice":
__lowerCamelCase : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCamelCase : Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] ) | 135 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
UpperCamelCase : Dict = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
UpperCamelCase : Optional[int] = BASE_URL + '/user'
# https://github.com/settings/tokens
UpperCamelCase : Union[str, Any] = os.environ.get("""USER_TOKEN""", """""")
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> str:
"""simple docstring"""
a : Any = {
'Authorization': F"""token {auth_token}""",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(snake_case_ , headers=snake_case_ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("""\'USER_TOKEN\' field cannot be empty.""")
| 370 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''sew-d'''
def __init__( self , lowerCAmelCase_=32 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_=2 , lowerCAmelCase_=5_12 , lowerCAmelCase_=2_56 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=("p2c", "c2p") , lowerCAmelCase_="layer_norm" , lowerCAmelCase_="gelu_python" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-7 , lowerCAmelCase_=1E-5 , lowerCAmelCase_="group" , lowerCAmelCase_="gelu" , lowerCAmelCase_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase_=False , lowerCAmelCase_=1_28 , lowerCAmelCase_=16 , lowerCAmelCase_=True , lowerCAmelCase_=0.05 , lowerCAmelCase_=10 , lowerCAmelCase_=2 , lowerCAmelCase_=0.0 , lowerCAmelCase_=10 , lowerCAmelCase_=0 , lowerCAmelCase_="mean" , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=2_56 , lowerCAmelCase_=0 , lowerCAmelCase_=1 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ) -> List[Any]:
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
_A = hidden_size
_A = feat_extract_norm
_A = feat_extract_activation
_A = list(lowerCAmelCase_ )
_A = list(lowerCAmelCase_ )
_A = list(lowerCAmelCase_ )
_A = conv_bias
_A = num_conv_pos_embeddings
_A = num_conv_pos_embedding_groups
_A = len(self.conv_dim )
_A = num_hidden_layers
_A = intermediate_size
_A = squeeze_factor
_A = max_position_embeddings
_A = position_buckets
_A = share_att_key
_A = relative_attention
_A = norm_rel_ebd
_A = list(lowerCAmelCase_ )
_A = hidden_act
_A = num_attention_heads
_A = hidden_dropout
_A = attention_dropout
_A = activation_dropout
_A = feat_proj_dropout
_A = final_dropout
_A = layer_norm_eps
_A = feature_layer_norm_eps
_A = initializer_range
_A = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_A = apply_spec_augment
_A = mask_time_prob
_A = mask_time_length
_A = mask_time_min_masks
_A = mask_feature_prob
_A = mask_feature_length
_A = mask_feature_min_masks
# ctc loss
_A = ctc_loss_reduction
_A = ctc_zero_infinity
# sequence classification
_A = use_weighted_layer_sum
_A = classifier_proj_size
@property
def UpperCAmelCase ( self ) -> str:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 180 | import torch
from torch import nn
class a ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1 , lowerCAmelCase_=False ) -> Any:
super().__init__()
_A = n_token
_A = d_embed
_A = d_proj
_A = cutoffs + [n_token]
_A = [0] + self.cutoffs
_A = div_val
_A = self.cutoffs[0]
_A = len(self.cutoffs ) - 1
_A = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_A = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
_A = nn.Parameter(torch.zeros(self.n_clusters ) )
_A = nn.ModuleList()
_A = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
else:
self.out_projs.append(lowerCAmelCase_ )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
self.out_layers.append(nn.Linear(lowerCAmelCase_ , r_idx - l_idx ) )
_A = keep_order
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
if proj is None:
_A = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_A = nn.functional.linear(lowerCAmelCase_ , proj.t().contiguous() )
_A = nn.functional.linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=False ) -> List[Any]:
if labels is not None:
# Shift so that tokens < n predict n
_A = hidden[..., :-1, :].contiguous()
_A = labels[..., 1:].contiguous()
_A = hidden.view(-1 , hidden.size(-1 ) )
_A = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_A = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
_A = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
_A = labels != -1_00
_A = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
_A = (
-nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
_A , _A = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.out_layers[0].weight[l_idx:r_idx]
_A = self.out_layers[0].bias[l_idx:r_idx]
else:
_A = self.out_layers[i].weight
_A = self.out_layers[i].bias
if i == 0:
_A = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_A = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
_A , _A , _A = weights[0], biases[0], self.out_projs[0]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
if labels is None:
_A = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_A = torch.zeros_like(lowerCAmelCase_ , dtype=hidden.dtype , device=hidden.device )
_A = 0
_A = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
_A , _A = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_A = (labels >= l_idx) & (labels < r_idx)
_A = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_A = labels.index_select(0 , lowerCAmelCase_ ) - l_idx
_A = head_logprob.index_select(0 , lowerCAmelCase_ )
_A = hidden.index_select(0 , lowerCAmelCase_ )
else:
_A = hidden
if i == 0:
if labels is not None:
_A = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
_A = head_logprob[:, : self.cutoffs[0]]
else:
_A , _A , _A = weights[i], biases[i], self.out_projs[i]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_A = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_A = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
_A = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_A = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , lowerCAmelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
if self.n_clusters == 0:
_A = self._compute_logit(lowerCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(lowerCAmelCase_ , dim=-1 )
else:
# construct weights and biases
_A , _A = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_A , _A = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_A = self.out_layers[0].weight[l_idx:r_idx]
_A = self.out_layers[0].bias[l_idx:r_idx]
else:
_A = self.out_layers[i].weight
_A = self.out_layers[i].bias
if i == 0:
_A = torch.cat([weight_i, self.cluster_weight] , dim=0 )
_A = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(lowerCAmelCase_ )
biases.append(lowerCAmelCase_ )
_A , _A , _A = weights[0], biases[0], self.out_projs[0]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_A = [0] + self.cutoffs
for i in range(len(lowerCAmelCase_ ) - 1 ):
_A , _A = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_A = head_logprob[:, : self.cutoffs[0]]
else:
_A , _A , _A = weights[i], biases[i], self.out_projs[i]
_A = self._compute_logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A = nn.functional.log_softmax(lowerCAmelCase_ , dim=1 )
_A = head_logprob[:, -i] + tail_logprob_i
_A = logprob_i
return out
| 180 | 1 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__UpperCamelCase = get_logger(__name__)
__UpperCamelCase = r'''\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'''
class UpperCamelCase :
@add_start_docstrings(_a)
def __call__( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
class UpperCamelCase :
@add_start_docstrings(_a)
def __call__( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.')
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
@add_start_docstrings(_a)
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> Dict:
for processor in self:
snake_case_ = inspect.signature(processor.__call__).parameters
if len(_a) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys())} for '
f'{processor.__class__} are passed to the logits processor.')
snake_case_ = processor(_a, _a, _a, **_a)
else:
snake_case_ = processor(_a, _a, _a)
return scores
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self, lowerCAmelCase__) -> Dict:
if not isinstance(_a, _a) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}')
snake_case_ = temperature
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = scores / self.temperature
return scores
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__ = -float('Inf'), lowerCAmelCase__ = 1) -> Optional[int]:
if not isinstance(_a, _a) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}')
if not isinstance(_a, _a) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}')
snake_case_ = top_p
snake_case_ = filter_value
snake_case_ = min_tokens_to_keep
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> int:
snake_case_ , snake_case_ = lax.top_k(_a, scores.shape[-1])
snake_case_ = jnp.full_like(_a, self.filter_value)
snake_case_ = jax.nn.softmax(_a, axis=-1).cumsum(axis=-1)
snake_case_ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
snake_case_ = jnp.roll(_a, 1)
score_mask |= score_mask.at[:, 0].set(_a)
# min tokens to keep
snake_case_ = score_mask.at[:, : self.min_tokens_to_keep].set(_a)
snake_case_ = jnp.where(_a, _a, _a)
snake_case_ = jax.lax.sort_key_val(_a, _a)[-1]
return next_scores
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__ = -float('Inf'), lowerCAmelCase__ = 1) -> Any:
if not isinstance(_a, _a) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}')
snake_case_ = max(_a, _a)
snake_case_ = filter_value
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str:
snake_case_ , snake_case_ = scores.shape
snake_case_ = jnp.full(batch_size * vocab_size, self.filter_value)
snake_case_ = min(self.top_k, scores.shape[-1]) # Safety check
snake_case_ , snake_case_ = lax.top_k(_a, _a)
snake_case_ = jnp.broadcast_to((jnp.arange(_a) * vocab_size)[:, None], (batch_size, topk)).flatten()
snake_case_ = topk_scores.flatten()
snake_case_ = topk_indices.flatten() + shift
snake_case_ = next_scores_flat.at[topk_indices_flat].set(_a)
snake_case_ = next_scores_flat.reshape(_a, _a)
return next_scores
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self, lowerCAmelCase__) -> Any:
snake_case_ = bos_token_id
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = jnp.full(scores.shape, -float('inf'))
snake_case_ = 1 - jnp.bool_(cur_len - 1)
snake_case_ = jnp.where(_a, new_scores.at[:, self.bos_token_id].set(0), _a)
return scores
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = max_length
snake_case_ = eos_token_id
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
snake_case_ = jnp.full(scores.shape, -float('inf'))
snake_case_ = 1 - jnp.bool_(cur_len - self.max_length + 1)
snake_case_ = jnp.where(_a, new_scores.at[:, self.eos_token_id].set(0), _a)
return scores
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
if not isinstance(_a, _a) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}')
if not isinstance(_a, _a) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}')
snake_case_ = min_length
snake_case_ = eos_token_id
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
# create boolean flag to decide if min length penalty should be applied
snake_case_ = 1 - jnp.clip(cur_len - self.min_length, 0, 1)
snake_case_ = jnp.where(_a, scores.at[:, self.eos_token_id].set(-float('inf')), _a)
return scores
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
snake_case_ = list(_a)
snake_case_ = begin_index
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Dict:
snake_case_ = 1 - jnp.bool_(cur_len - self.begin_index)
snake_case_ = jnp.where(_a, scores.at[:, self.begin_suppress_tokens].set(-float('inf')), _a)
return scores
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self, lowerCAmelCase__) -> List[str]:
snake_case_ = list(_a)
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = scores.at[..., self.suppress_tokens].set(-float('inf'))
return scores
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self, lowerCAmelCase__) -> str:
snake_case_ = dict(_a)
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
snake_case_ = jnp.ones((max(force_token_map.keys()) + 1), dtype=jnp.intaa) * -1
for index, token in force_token_map.items():
if token is not None:
snake_case_ = force_token_array.at[index].set(_a)
snake_case_ = jnp.intaa(_a)
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Any:
def _force_token(lowerCAmelCase__):
snake_case_ = scores.shape[0]
snake_case_ = self.force_token_array[generation_idx]
snake_case_ = jnp.ones_like(_a, dtype=scores.dtype) * -float('inf')
snake_case_ = jnp.zeros((batch_size, 1), dtype=scores.dtype)
snake_case_ = lax.dynamic_update_slice(_a, _a, (0, current_token))
return new_scores
snake_case_ = lax.cond(
cur_len >= self.force_token_array.shape[0], lambda: scores, lambda: lax.cond(
self.force_token_array[cur_len] >= 0, lambda: _force_token(_a), lambda: scores, ), )
return scores
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> int:
snake_case_ = generate_config.eos_token_id
snake_case_ = generate_config.no_timestamps_token_id
snake_case_ = generate_config.no_timestamps_token_id + 1
snake_case_ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_a, 'max_initial_timestamp_index'):
snake_case_ = generate_config.max_initial_timestamp_index
else:
snake_case_ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
snake_case_ = model_config.vocab_size
def __call__( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> int:
# suppress <|notimestamps|> which is handled by without_timestamps
snake_case_ = scores.at[:, self.no_timestamps_token_id].set(-float('inf'))
def handle_pairs(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = jnp.where((cur_len - self.begin_index) >= 1, _a, _a)
snake_case_ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin, True and last_was_timestamp, _a, )
snake_case_ = jnp.where((cur_len - self.begin_index) < 2, _a, _a)
snake_case_ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin, _a, _a, )
return jnp.where(
_a, jnp.where(
penultimate_was_timestamp > 0, scores_k.at[self.timestamp_begin :].set(-float('inf')), scores_k.at[: self.eos_token_id].set(-float('inf')), ), _a, )
snake_case_ = jax.vmap(_a)(_a, _a)
snake_case_ = jnp.where(cur_len == self.begin_index, _a, _a)
snake_case_ = jnp.where(
self.max_initial_timestamp_index is not None, True and apply_max_initial_timestamp, _a, )
snake_case_ = self.timestamp_begin + self.max_initial_timestamp_index
snake_case_ = jnp.where(
_a, scores.at[:, last_allowed + 1 :].set(-float('inf')), _a, )
# if sum of probability over timestamps is above any other token, sample timestamp
snake_case_ = jax.nn.log_softmax(_a, axis=-1)
def handle_cumulative_probs(lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1)
snake_case_ = jnp.max(logprobs_k[: self.timestamp_begin])
return jnp.where(
timestamp_logprob > max_text_token_logprob, scores_k.at[: self.timestamp_begin].set(-float('inf')), _a, )
snake_case_ = jax.vmap(_a)(_a, _a)
return scores
| 361 | """simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , )
def UpperCAmelCase ( ) -> None:
snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423]
snake_case_ = math.log(len(UpperCAmelCase ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 312 | 0 |
"""simple docstring"""
class _UpperCAmelCase:
def __init__( self , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = val
_UpperCamelCase = None
_UpperCamelCase = None
def UpperCAmelCase ( self , __a) -> Union[str, Any]:
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
_UpperCamelCase = Node(__a)
else:
self.left.insert(__a)
elif val > self.val:
if self.right is None:
_UpperCamelCase = Node(__a)
else:
self.right.insert(__a)
else:
_UpperCamelCase = val
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
if root:
inorder(root.left, __lowerCAmelCase )
res.append(root.val )
inorder(root.right, __lowerCAmelCase )
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
if len(__lowerCAmelCase ) == 0:
return arr
_UpperCamelCase = Node(arr[0] )
for i in range(1, len(__lowerCAmelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
_UpperCamelCase = []
inorder(__lowerCAmelCase, __lowerCAmelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 194 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : List[Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a_ : Optional[int] = (
{
"""feature-extraction""": TFMobileBertModel,
"""fill-mask""": TFMobileBertForMaskedLM,
"""question-answering""": TFMobileBertForQuestionAnswering,
"""text-classification""": TFMobileBertForSequenceClassification,
"""token-classification""": TFMobileBertForTokenClassification,
"""zero-shot""": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ : Union[str, Any] = False
a_ : Optional[Any] = False
def lowerCamelCase ( self : Optional[int] , a_ : Optional[Any] , a_ : Tuple , a_ : int=False ):
lowerCAmelCase_ : Union[str, Any] = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class in get_values(a_ ):
lowerCAmelCase_ : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : int , a_ : Tuple , a_ : Tuple=13 , a_ : Dict=7 , a_ : Tuple=True , a_ : Optional[Any]=True , a_ : List[Any]=True , a_ : Any=True , a_ : Any=99 , a_ : List[Any]=32 , a_ : int=32 , a_ : int=2 , a_ : Union[str, Any]=4 , a_ : int=37 , a_ : Tuple="gelu" , a_ : Dict=0.1 , a_ : str=0.1 , a_ : Union[str, Any]=5_12 , a_ : List[str]=16 , a_ : List[Any]=2 , a_ : int=0.02 , a_ : List[Any]=3 , a_ : int=4 , a_ : Any=None , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : Any = seq_length
lowerCAmelCase_ : Any = is_training
lowerCAmelCase_ : int = use_input_mask
lowerCAmelCase_ : Tuple = use_token_type_ids
lowerCAmelCase_ : int = use_labels
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : Any = num_attention_heads
lowerCAmelCase_ : Dict = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : List[Any] = max_position_embeddings
lowerCAmelCase_ : int = type_vocab_size
lowerCAmelCase_ : int = type_sequence_label_size
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Union[str, Any] = num_labels
lowerCAmelCase_ : Optional[Any] = num_choices
lowerCAmelCase_ : List[str] = scope
lowerCAmelCase_ : Tuple = embedding_size
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : List[str] = None
if self.use_input_mask:
lowerCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Dict = None
lowerCAmelCase_ : Any = None
if self.use_labels:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : str = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : List[str] , a_ : int , a_ : List[str] , a_ : Dict , a_ : Optional[Any] , a_ : Optional[Any] , a_ : List[str] , a_ : Dict ):
lowerCAmelCase_ : Optional[int] = TFMobileBertModel(config=a_ )
lowerCAmelCase_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : str = model(a_ )
lowerCAmelCase_ : Dict = [input_ids, input_mask]
lowerCAmelCase_ : List[str] = model(a_ )
lowerCAmelCase_ : Union[str, Any] = model(a_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase ( self : Dict , a_ : Optional[Any] , a_ : List[Any] , a_ : Union[str, Any] , a_ : Optional[Any] , a_ : Dict , a_ : Any , a_ : List[Any] ):
lowerCAmelCase_ : Tuple = TFMobileBertForMaskedLM(config=a_ )
lowerCAmelCase_ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : Any = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : Tuple , a_ : List[Any] , a_ : Any , a_ : List[Any] , a_ : List[Any] , a_ : Any , a_ : Optional[int] , a_ : str ):
lowerCAmelCase_ : Dict = TFMobileBertForNextSentencePrediction(config=a_ )
lowerCAmelCase_ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : Union[str, Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCamelCase ( self : Any , a_ : List[str] , a_ : str , a_ : Union[str, Any] , a_ : Dict , a_ : int , a_ : Optional[int] , a_ : int ):
lowerCAmelCase_ : Optional[int] = TFMobileBertForPreTraining(config=a_ )
lowerCAmelCase_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : Tuple = model(a_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCamelCase ( self : Any , a_ : Optional[Any] , a_ : List[str] , a_ : List[str] , a_ : int , a_ : List[Any] , a_ : Tuple , a_ : Optional[int] ):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : List[str] = TFMobileBertForSequenceClassification(config=a_ )
lowerCAmelCase_ : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : Any = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : List[str] , a_ : Optional[Any] , a_ : Dict , a_ : Dict , a_ : List[Any] , a_ : Union[str, Any] , a_ : int , a_ : Dict ):
lowerCAmelCase_ : Any = self.num_choices
lowerCAmelCase_ : Any = TFMobileBertForMultipleChoice(config=a_ )
lowerCAmelCase_ : Any = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ : int = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ : Tuple = tf.tile(tf.expand_dims(a_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ : Tuple = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase_ : str = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self : Union[str, Any] , a_ : Union[str, Any] , a_ : Optional[int] , a_ : List[Any] , a_ : str , a_ : Optional[Any] , a_ : int , a_ : Union[str, Any] ):
lowerCAmelCase_ : Dict = self.num_labels
lowerCAmelCase_ : int = TFMobileBertForTokenClassification(config=a_ )
lowerCAmelCase_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : Optional[Any] = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : Optional[int] , a_ : Any , a_ : Union[str, Any] , a_ : Dict , a_ : int , a_ : Tuple , a_ : Any , a_ : List[str] ):
lowerCAmelCase_ : Dict = TFMobileBertForQuestionAnswering(config=a_ )
lowerCAmelCase_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase_ : int = model(a_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : Dict = config_and_inputs
lowerCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : str = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowerCAmelCase_ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def lowerCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*a_ )
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*a_ )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a_ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a_ )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*a_ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*a_ )
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a_ )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*a_ )
@slow
def lowerCamelCase ( self : Optional[int] ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
lowerCAmelCase_ : Optional[Any] = TFMobileBertModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_tf
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[str] = TFMobileBertForPreTraining.from_pretrained("google/mobilebert-uncased" )
lowerCAmelCase_ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ : Tuple = model(a_ )[0]
lowerCAmelCase_ : List[Any] = [1, 6, 3_05_22]
self.assertEqual(output.shape , a_ )
lowerCAmelCase_ : List[str] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , a_ , atol=1e-4 )
| 161 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowercase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Tuple:
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCAmelCase_ : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = XLMProphetNetForConditionalGeneration.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase )
else:
lowerCAmelCase_ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
__UpperCamelCase , output_loading_info=__UpperCamelCase )
lowerCAmelCase_ : List[str] = ["key_proj", "value_proj", "query_proj"]
lowerCAmelCase_ : Tuple = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowerCAmelCase_ : Dict = key.split("." )
if attributes[0] == "lm_head":
lowerCAmelCase_ : int = prophet
lowerCAmelCase_ : int = prophet_old
else:
lowerCAmelCase_ : str = prophet.prophetnet
lowerCAmelCase_ : int = prophet_old.model
lowerCAmelCase_ : Optional[int] = False
for attribute in attributes:
if attribute in mapping:
lowerCAmelCase_ : Tuple = mapping[attribute]
if not hasattr(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) > 0:
lowerCAmelCase_ : Optional[Any] = attribute
elif hasattr(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCAmelCase_ : str = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowerCAmelCase_ : Union[str, Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCAmelCase_ : Tuple = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowerCAmelCase_ : Optional[int] = True
break
elif attribute in special_keys and hasattr(__UpperCamelCase , "in_proj_weight" ):
lowerCAmelCase_ : List[Any] = old_model.in_proj_weight.shape[0] // 3
lowerCAmelCase_ : List[str] = getattr(__UpperCamelCase , __UpperCamelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCAmelCase_ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCAmelCase_ : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCAmelCase_ : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCAmelCase_ : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCAmelCase_ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCAmelCase_ : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCAmelCase_ : List[str] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowerCAmelCase_ : Any = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowerCAmelCase_ : int = True
break
if attribute.isdigit():
lowerCAmelCase_ : Tuple = model[int(__UpperCamelCase )]
lowerCAmelCase_ : Tuple = old_model[int(__UpperCamelCase )]
else:
lowerCAmelCase_ : Optional[int] = getattr(__UpperCamelCase , __UpperCamelCase )
if old_attribute == "":
lowerCAmelCase_ : Tuple = old_model
else:
if not hasattr(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowerCAmelCase_ : List[Any] = getattr(__UpperCamelCase , __UpperCamelCase )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 161 | 1 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Tuple = AlbertConfig.from_json_file(_A )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ : Union[str, Any] = AlbertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_A, _A, _A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _A )
if __name__ == "__main__":
__magic_name__: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__: Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 342 |
__magic_name__: str = [0, 2, 4, 6, 8]
__magic_name__: Optional[int] = [1, 3, 5, 7, 9]
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1, -1, -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
__magic_name__ : List[Any] = 0
for digit in range(10 ):
__magic_name__ : Optional[int] = digit
result += reversible_numbers(
0, (remainder + 2 * digit) // 10, _A, _A )
return result
__magic_name__ : str = 0
for digita in range(10 ):
__magic_name__ : Optional[Any] = digita
if (remainder + digita) % 2 == 0:
__magic_name__ : Tuple = ODD_DIGITS
else:
__magic_name__ : str = EVEN_DIGITS
for digita in other_parity_digits:
__magic_name__ : Tuple = digita
result += reversible_numbers(
remaining_length - 2, (remainder + digita + digita) // 10, _A, _A, )
return result
def UpperCamelCase ( _A = 9 ):
"""simple docstring"""
__magic_name__ : List[str] = 0
for length in range(1, max_power + 1 ):
result += reversible_numbers(_A, 0, [0] * length, _A )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 342 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
"configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"],
"tokenization_cpmant": ["CpmAntTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST",
"CpmAntForCausalLM",
"CpmAntModel",
"CpmAntPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369 |
__UpperCAmelCase = [
(10_00, "M"),
(9_00, "CM"),
(5_00, "D"),
(4_00, "CD"),
(1_00, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
while place < len(__lowerCamelCase ):
if (place + 1 < len(__lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = []
for arabic, roman in ROMAN:
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = divmod(__lowerCamelCase, __lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(__lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257 | 0 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = (KDPMaDiscreteScheduler,)
_lowercase : str = 10
def SCREAMING_SNAKE_CASE__ ( self , **a) -> int:
SCREAMING_SNAKE_CASE = {
'num_train_timesteps': 1100,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**a)
return config
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02]):
self.check_over_configs(beta_start=a , beta_end=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type='v_prediction')
SCREAMING_SNAKE_CASE = scheduler_class(**a)
scheduler.set_timesteps(self.num_inference_steps)
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(a)
for i, t in enumerate(scheduler.timesteps):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(a , a)
SCREAMING_SNAKE_CASE = model(a , a)
SCREAMING_SNAKE_CASE = scheduler.step(a , a , a)
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(a))
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07) < 1E-2
assert abs(result_mean.item() - 0.00_02) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**a)
scheduler.set_timesteps(self.num_inference_steps)
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE = sample.to(a)
for i, t in enumerate(scheduler.timesteps):
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(a , a)
SCREAMING_SNAKE_CASE = model(a , a)
SCREAMING_SNAKE_CASE = scheduler.step(a , a , a)
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(a))
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25) < 1E-2
assert abs(result_mean.item() - 0.02_66) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25) < 1E-2
assert abs(result_mean.item() - 0.02_66) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
if torch_device == "mps":
return
SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE = self.get_scheduler_config()
SCREAMING_SNAKE_CASE = scheduler_class(**a)
scheduler.set_timesteps(self.num_inference_steps , device=a)
SCREAMING_SNAKE_CASE = self.dummy_model()
SCREAMING_SNAKE_CASE = self.dummy_sample_deter.to(a) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE = scheduler.scale_model_input(a , a)
SCREAMING_SNAKE_CASE = model(a , a)
SCREAMING_SNAKE_CASE = scheduler.step(a , a , a)
SCREAMING_SNAKE_CASE = output.prev_sample
SCREAMING_SNAKE_CASE = torch.sum(torch.abs(a))
SCREAMING_SNAKE_CASE = torch.mean(torch.abs(a))
if str(a).startswith('cpu'):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25) < 1E-2
assert abs(result_mean.item() - 0.02_66) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25) < 1E-2
assert abs(result_mean.item() - 0.02_66) < 1E-3
| 137 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : List[Any] = {'vocab_file': 'spiece.model'}
a_ : Dict = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
a_ : Tuple = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
a_ : int = 0
a_ : Optional[int] = 1
a_ : int = 2
a_ : Union[str, Any] = 3
a_ : List[str] = 4
class _snake_case ( A__ ):
_lowercase : List[str] = VOCAB_FILES_NAMES
_lowercase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Union[str, Any] = '''left'''
def __init__( self , a , a=False , a=True , a=False , a="<s>" , a="</s>" , a="<unk>" , a="<sep>" , a="<pad>" , a="<cls>" , a="<mask>" , a=["<eop>", "<eod>"] , a = None , **a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE = AddedToken(a , lstrip=a , rstrip=a) if isinstance(a , a) else mask_token
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return len(self.sp_model)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Tuple:
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , a) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
if self.remove_space:
SCREAMING_SNAKE_CASE = ' '.join(inputs.strip().split())
else:
SCREAMING_SNAKE_CASE = inputs
SCREAMING_SNAKE_CASE = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
SCREAMING_SNAKE_CASE = unicodedata.normalize('NFKD' , a)
SCREAMING_SNAKE_CASE = ''.join([c for c in outputs if not unicodedata.combining(a)])
if self.do_lower_case:
SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
SCREAMING_SNAKE_CASE = self.preprocess_text(a)
SCREAMING_SNAKE_CASE = self.sp_model.encode(a , out_type=a)
SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(a) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(a)
else:
new_pieces.append(a)
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
return self.sp_model.PieceToId(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
return self.sp_model.IdToPiece(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> int:
SCREAMING_SNAKE_CASE = ''.join(a).replace(a , ' ').strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , a , a = False , a = None , a = True , **a , ) -> str:
SCREAMING_SNAKE_CASE = kwargs.pop('use_source_tokenizer' , a)
SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(a , skip_special_tokens=a)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a))
SCREAMING_SNAKE_CASE = []
sub_texts.append(a)
else:
current_sub_text.append(a)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE = ''.join(a)
SCREAMING_SNAKE_CASE = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE = self.clean_up_tokenization(a)
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is not None:
return ([0] * len(a)) + [1] + ([0] * len(a)) + [1, 1]
return ([0] * len(a)) + [1, 1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
if not os.path.isdir(a):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , a)
elif not os.path.isfile(self.vocab_file):
with open(a , 'wb') as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(a)
return (out_vocab_file,)
| 137 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ) -> str:
_UpperCAmelCase : List[Any] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
_UpperCAmelCase : Union[str, Any] = """The dog is cute and lives in the garden house"""
_UpperCAmelCase : int = jnp.array([tokenizer.encode(a_ )] )
_UpperCAmelCase : Any = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_UpperCAmelCase : Optional[int] = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_UpperCAmelCase : Optional[Any] = model(a_ )["""last_hidden_state"""]
self.assertEqual(output.shape ,a_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,a_ ,atol=1E-3 ) )
| 351 |
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("""only integers accepted as input""" )
else:
_UpperCAmelCase : Dict = str(abs(lowerCAmelCase_ ) )
_UpperCAmelCase : Optional[Any] = [list(lowerCAmelCase_ ) for char in range(len(lowerCAmelCase_ ) )]
for index in range(len(lowerCAmelCase_ ) ):
num_transpositions[index].pop(lowerCAmelCase_ )
return max(
int("""""".join(list(lowerCAmelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 349 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCAmelCase :List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase :Union[str, Any] = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase :Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': 5_1_2,
}
lowerCAmelCase :Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class _lowerCamelCase ( __UpperCamelCase ):
'''simple docstring'''
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = PRETRAINED_INIT_CONFIGURATION
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : str = LxmertTokenizer
def __init__( self : List[Any] , _A : Optional[Any]=None , _A : Optional[Any]=None , _A : Any=True , _A : Tuple="[UNK]" , _A : str="[SEP]" , _A : Optional[Any]="[PAD]" , _A : List[str]="[CLS]" , _A : Tuple="[MASK]" , _A : Optional[Any]=True , _A : Any=None , **_A : Optional[int] , ) -> List[Any]:
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , tokenize_chinese_chars=a_ , strip_accents=a_ , **a_ , )
__magic_name__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a_ ) != do_lower_case
or normalizer_state.get('strip_accents' , a_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a_ ) != tokenize_chinese_chars
):
__magic_name__ : Optional[int] = getattr(a_ , normalizer_state.pop('type' ) )
__magic_name__ : Union[str, Any] = do_lower_case
__magic_name__ : Tuple = strip_accents
__magic_name__ : Any = tokenize_chinese_chars
__magic_name__ : List[str] = normalizer_class(**a_ )
__magic_name__ : str = do_lower_case
def __lowerCAmelCase ( self : Dict , _A : List[Any] , _A : Union[str, Any]=None ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = [self.sep_token_id]
__magic_name__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ) -> Optional[Any]:
__magic_name__ : Any = self._tokenizer.model.save(a_ , name=a_ )
return tuple(a_ ) | 331 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__A =getLogger(__name__)
def a ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 8 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : Any="val" , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=False , _UpperCAmelCase : Union[str, Any]="summarization" , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Dict = None , _UpperCAmelCase : Dict="" , **_UpperCAmelCase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Any = str(_UpperCAmelCase )
assert local_rank is not None
torch.distributed.init_process_group(backend='''nccl''' , rank=_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = Path(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = save_dir.joinpath(f'rank_{local_rank}_output.json' )
torch.cuda.set_device(_UpperCAmelCase )
__UpperCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_UpperCAmelCase ).cuda()
if fpaa:
__UpperCAmelCase : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(_UpperCAmelCase , _UpperCAmelCase ) # update config with task specific params
__UpperCAmelCase : List[str] = generate_kwargs.pop('''num_beams''' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
__UpperCAmelCase : Any = num_return_sequences
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(_UpperCAmelCase )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
if max_source_length is None:
__UpperCAmelCase : Optional[Any] = tokenizer.model_max_length
if prefix is None:
__UpperCAmelCase : str = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
__UpperCAmelCase : Union[str, Any] = SeqaSeqDataset(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , max_target_length=10_24 , type_path=_UpperCAmelCase , n_obs=_UpperCAmelCase , prefix=_UpperCAmelCase , **_UpperCAmelCase , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
__UpperCAmelCase : str = ds.make_sortish_sampler(_UpperCAmelCase , distributed=_UpperCAmelCase , add_extra_examples=_UpperCAmelCase , shuffle=_UpperCAmelCase )
__UpperCAmelCase : List[Any] = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=_UpperCAmelCase , collate_fn=ds.collate_fn )
__UpperCAmelCase : List[Any] = []
for batch in tqdm(_UpperCAmelCase ):
__UpperCAmelCase : str = model.generate(
input_ids=batch['''input_ids'''].to(model.device ) , attention_mask=batch['''attention_mask'''].to(model.device ) , num_return_sequences=_UpperCAmelCase , num_beams=_UpperCAmelCase , **_UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
__UpperCAmelCase : List[str] = batch['''ids''']
if num_return_sequences > 1:
__UpperCAmelCase : Any = chunks(_UpperCAmelCase , _UpperCAmelCase ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(_UpperCAmelCase ):
results.append({'''pred''': pred, '''id''': ids[i].item()} )
save_json(_UpperCAmelCase , _UpperCAmelCase )
return results, sampler.num_replicas
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(
epilog='''Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate''' )
parser.add_argument('''--data_dir''' , type=_UpperCAmelCase , help='''like cnn_dm/test.source''' )
parser.add_argument(
'''--model_name''' , type=_UpperCAmelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' , default='''sshleifer/distilbart-xsum-12-3''' , )
parser.add_argument('''--save_dir''' , type=_UpperCAmelCase , help='''where to save''' , default='''tmp_gen''' )
parser.add_argument('''--max_source_length''' , type=_UpperCAmelCase , default=_UpperCAmelCase )
parser.add_argument(
'''--type_path''' , type=_UpperCAmelCase , default='''test''' , help='''which subset to evaluate typically train/val/test''' )
parser.add_argument('''--task''' , type=_UpperCAmelCase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_UpperCAmelCase , default=8 , required=_UpperCAmelCase , help='''batch size''' )
parser.add_argument(
'''--local_rank''' , type=_UpperCAmelCase , default=-1 , required=_UpperCAmelCase , help='''should be passed by distributed.launch''' )
parser.add_argument(
'''--n_obs''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='''How many observations. Defaults to all.''' )
parser.add_argument(
'''--num_return_sequences''' , type=_UpperCAmelCase , default=1 , required=_UpperCAmelCase , help='''How many sequences to return''' )
parser.add_argument(
'''--sync_timeout''' , type=_UpperCAmelCase , default=6_00 , required=_UpperCAmelCase , help='''How long should master process wait for other processes to finish.''' , )
parser.add_argument('''--src_lang''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument('''--tgt_lang''' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase )
parser.add_argument(
'''--prefix''' , type=_UpperCAmelCase , required=_UpperCAmelCase , default=_UpperCAmelCase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--debug''' , action='''store_true''' )
__UpperCAmelCase : Any = time.time()
__UpperCAmelCase , __UpperCAmelCase : Any = parser.parse_known_args()
__UpperCAmelCase : List[Any] = parse_numeric_n_bool_cl_kwargs(_UpperCAmelCase )
if generate_kwargs and args.local_rank <= 0:
print(f'parsed the following generate kwargs: {generate_kwargs}' )
__UpperCAmelCase : Union[str, Any] = Path(args.save_dir + '''_tmp''' )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) # this handles locking.
__UpperCAmelCase : int = list(json_save_dir.glob('''rank_*.json''' ) )
if intermediate_files:
raise ValueError(f'Found files at {json_save_dir} please move or remove them.' )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
__UpperCAmelCase : List[Any] = {}
if args.src_lang is not None:
__UpperCAmelCase : List[str] = args.src_lang
if args.tgt_lang is not None:
__UpperCAmelCase : List[Any] = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = eval_data_dir(
args.data_dir , _UpperCAmelCase , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=_UpperCAmelCase , **_UpperCAmelCase , )
if args.local_rank <= 0:
__UpperCAmelCase : int = Path(args.save_dir )
save_dir.mkdir(exist_ok=_UpperCAmelCase )
__UpperCAmelCase : List[str] = gather_results_from_each_node(_UpperCAmelCase , _UpperCAmelCase , args.sync_timeout )
__UpperCAmelCase : List[Any] = combine_partial_results(_UpperCAmelCase )
if args.num_return_sequences > 1:
__UpperCAmelCase : int = save_dir.joinpath('''pseudolabel_results.json''' )
print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' )
save_json(_UpperCAmelCase , _UpperCAmelCase )
return
__UpperCAmelCase : str = Path(args.data_dir ).joinpath(args.type_path + '''.target''' )
with open(_UpperCAmelCase ) as f:
__UpperCAmelCase : int = [x.rstrip() for x in f.readlines()][: len(_UpperCAmelCase )]
# Calculate metrics, save metrics, and save _generations.txt
__UpperCAmelCase : Optional[Any] = '''translation''' in args.task
__UpperCAmelCase : Optional[int] = calculate_bleu if calc_bleu else calculate_rouge
__UpperCAmelCase : Union[str, Any] = '''bleu''' if calc_bleu else '''rouge'''
__UpperCAmelCase : Dict = score_fn(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = len(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = time.time() - start_time
__UpperCAmelCase : List[str] = round(runtime / metrics['''n_obs'''] , 4 )
__UpperCAmelCase : List[str] = num_replicas
# TODO(@stas00): add whatever metadata to metrics
__UpperCAmelCase : List[Any] = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' )
save_json(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase )
print(_UpperCAmelCase )
write_txt_file(_UpperCAmelCase , save_dir.joinpath(f'{args.type_path}_generations.txt' ) )
if args.debug:
write_txt_file(_UpperCAmelCase , save_dir.joinpath(f'{args.type_path}.target' ) )
else:
shutil.rmtree(_UpperCAmelCase )
def a ( _UpperCAmelCase : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = []
for partial_result in partial_results:
records.extend(_UpperCAmelCase )
__UpperCAmelCase : List[str] = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["id"] )
__UpperCAmelCase : Union[str, Any] = [x['''pred'''] for x in records]
return preds
def a ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = time.time()
logger.info('''waiting for all nodes to finish''' )
__UpperCAmelCase : Any = None
while (time.time() - start_wait) < timeout:
__UpperCAmelCase : List[Any] = list(save_dir.glob('''rank_*.json''' ) )
if len(_UpperCAmelCase ) < num_replicas:
continue
try:
# make sure all json files are fully saved
__UpperCAmelCase : Union[str, Any] = lmap(_UpperCAmelCase , _UpperCAmelCase )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('''Rank 0 gave up on waiting for other processes''' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 226 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : List[Any] = [1]
snake_case__, snake_case__, snake_case__ : Any = 0, 0, 0
snake_case__ : Union[str, Any] = ugly_nums[ia] * 2
snake_case__ : Optional[Any] = ugly_nums[ia] * 3
snake_case__ : int = ugly_nums[ia] * 5
for _ in range(1 , snake_case_ ):
snake_case__ : List[Any] = min(snake_case_ , snake_case_ , snake_case_ )
ugly_nums.append(snake_case_ )
if next_num == next_a:
ia += 1
snake_case__ : Tuple = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
snake_case__ : Tuple = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
snake_case__ : Tuple = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"{ugly_numbers(200) = }")
| 286 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
for param in module.parameters():
snake_case__ : Tuple = False
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case__ : List[Any] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[str] = plt.imshow(snake_case_ )
fig.axes.get_xaxis().set_visible(snake_case_ )
fig.axes.get_yaxis().set_visible(snake_case_ )
plt.show()
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : str = datetime.now()
snake_case__ : Optional[Any] = current_time.strftime("%H:%M:%S" )
return timestamp
| 286 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.