code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
snake_case_ : Tuple = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class lowercase__ ( lowercase ):
lowercase__ = """ernie_m"""
lowercase__ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Union[str, Any] ,lowerCamelCase__ : int = 250002 ,lowerCamelCase__ : int = 768 ,lowerCamelCase__ : int = 12 ,lowerCamelCase__ : int = 12 ,lowerCamelCase__ : int = 3072 ,lowerCamelCase__ : str = "gelu" ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : float = 0.1 ,lowerCamelCase__ : int = 514 ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : float = 1E-05 ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Union[str, Any]=False ,lowerCamelCase__ : int=0.0 ,**lowerCamelCase__ : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Optional[Any] = hidden_size
_UpperCamelCase : Union[str, Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Union[str, Any] = hidden_dropout_prob
_UpperCamelCase : int = attention_probs_dropout_prob
_UpperCamelCase : Tuple = max_position_embeddings
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : Tuple = layer_norm_eps
_UpperCamelCase : Tuple = classifier_dropout
_UpperCamelCase : str = is_decoder
_UpperCamelCase : Dict = act_dropout
| 195 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def A__ ( UpperCAmelCase_="" ):
_UpperCamelCase : Any = tempfile.mkdtemp()
return os.path.join(UpperCAmelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : List[str] = torch.rand(12 ,dtype=torch.floataa ) - 0.5
_UpperCamelCase : Optional[int] = AgentAudio(lowerCamelCase__ )
_UpperCamelCase : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ ,agent_type.to_raw() ,atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ ,torch.tensor(lowerCamelCase__ ) ,atol=1E-4 ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.rand(12 ,dtype=torch.floataa ) - 0.5
_UpperCamelCase : Any = get_new_path(suffix='.wav' )
sf.write(lowerCamelCase__ ,lowerCamelCase__ ,16000 )
_UpperCamelCase : List[Any] = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ ,agent_type.to_raw() ,atol=1E-4 ) )
self.assertEqual(agent_type.to_string() ,lowerCamelCase__ )
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : int = torch.randint(0 ,256 ,(64, 64, 3) )
_UpperCamelCase : Optional[Any] = AgentImage(lowerCamelCase__ )
_UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ ,agent_type._tensor ,atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() ,Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_UpperCamelCase : Tuple = Image.open(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_UpperCamelCase : Union[str, Any] = Image.open(lowerCamelCase__ )
_UpperCamelCase : List[Any] = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[Any] = 'Hey!'
_UpperCamelCase : Optional[int] = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,agent_type.to_string() )
self.assertEqual(lowerCamelCase__ ,agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 195 | 1 |
"""simple docstring"""
from __future__ import annotations
def A__ ( A__ , A__ , A__ , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
"""simple docstring"""
import requests
SCREAMING_SNAKE_CASE_ = '''''' # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE_ = '''https://api.openweathermap.org/data/2.5/'''
def A__ ( A__ = "Chicago" , A__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def A__ ( A__ = "Kolkata, India" , A__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def A__ ( A__ = 55.68 , A__ = 12.57 , A__ = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE_ = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 579 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case=None , snake_case=None ) -> Any:
if attention_mask is None:
__lowercase = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class snake_case_ :
'''simple docstring'''
__UpperCamelCase = OPTConfig
__UpperCamelCase = {}
__UpperCamelCase = '''gelu'''
def __init__( self : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=13 , __lowerCamelCase : Tuple=7 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=False , __lowerCamelCase : Dict=99 , __lowerCamelCase : Tuple=16 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Any=4 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Dict=20 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Optional[int]=16 , __lowerCamelCase : List[Any]=16 , ) -> Dict:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = eos_token_id
__lowercase = pad_token_id
__lowercase = bos_token_id
__lowercase = embed_dim
__lowercase = word_embed_proj_dim
__lowercase = False
def UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__lowerCamelCase , **self.config_updates , )
__lowercase = prepare_opt_inputs_dict(__lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
__lowercase = TFOPTModel(config=__lowerCamelCase )
__lowercase = inputs_dict["input_ids"]
__lowercase = input_ids[:1, :]
__lowercase = inputs_dict["attention_mask"][:1, :]
__lowercase = 1
# first forward pass
__lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , use_cache=__lowerCamelCase )
__lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase )[0]
__lowercase = model(__lowerCamelCase , attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase = output_from_no_past[:, -3:, random_slice_idx]
__lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowerCamelCase , __lowerCamelCase , rtol=1E-3 )
@require_tf
class snake_case_ ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__UpperCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
__UpperCamelCase = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 10
def UpperCAmelCase ( self : Any ) -> List[Any]:
'''simple docstring'''
__lowercase = TFOPTModelTester(self )
__lowercase = ConfigTester(self , config_class=__lowerCamelCase )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase )
def UpperCAmelCase ( self : Tuple ) -> int:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ):
if hasattr(__lowerCamelCase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__lowerCamelCase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowercase = model_class(config=__lowerCamelCase )
__lowercase = _get_word_embedding_weight(__lowerCamelCase , model.get_input_embeddings() )
__lowercase = _get_word_embedding_weight(__lowerCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__lowerCamelCase )
__lowercase = _get_word_embedding_weight(__lowerCamelCase , model.get_input_embeddings() )
__lowercase = _get_word_embedding_weight(__lowerCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowercase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __lowerCamelCase )
# check that weights remain the same after resizing
__lowercase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase = False
self.assertTrue(__lowerCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __lowerCamelCase )
__lowercase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase = False
self.assertTrue(__lowerCamelCase )
def SCREAMING_SNAKE_CASE ( snake_case ) -> Optional[Any]:
return tf.constant(_lowerCamelCase , dtype=tf.intaa )
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = 99
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowercase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowercase = input_ids.shape[0]
__lowercase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
__lowercase = TFOPTModel.from_pretrained('facebook/opt-350m' )
__lowercase = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__lowercase = tf.not_equal(__lowerCamelCase , model.config.pad_token_id )
with tf.GradientTape():
__lowercase = model(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase ).last_hidden_state
__lowercase = (1, 11, 512)
self.assertEqual(output.shape , __lowerCamelCase )
__lowercase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=4E-3 ) )
__lowercase = tf.function(__lowerCamelCase , jit_compile=__lowerCamelCase )
__lowercase = xla_generate(__lowerCamelCase , __lowerCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __lowerCamelCase , atol=4E-2 ) )
@require_tf
@slow
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
super().setUp()
__lowercase = "facebook/opt-350m"
def UpperCAmelCase ( self : Any ) -> Optional[int]:
'''simple docstring'''
__lowercase = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowercase = GPTaTokenizer.from_pretrained(self.path_model )
__lowercase = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowercase = tokenizer(__lowerCamelCase , return_tensors='tf' , padding=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__lowercase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowercase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-4 ) )
__lowercase = tf.function(__lowerCamelCase , jit_compile=__lowerCamelCase )
__lowercase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-4 ) )
@require_tf
@slow
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCAmelCase ( self : Any ) -> List[str]:
'''simple docstring'''
__lowercase = "facebook/opt-125m"
__lowercase = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
__lowercase = []
__lowercase = GPTaTokenizer.from_pretrained(__lowerCamelCase )
__lowercase = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
for prompt in self.prompts:
__lowercase = tokenizer(__lowerCamelCase , return_tensors='tf' ).input_ids
__lowercase = model.generate(__lowerCamelCase , max_length=10 )
__lowercase = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
__lowercase = "facebook/opt-350m"
__lowercase = GPTaTokenizer.from_pretrained(__lowerCamelCase )
__lowercase = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
__lowercase = "left"
# use different length sentences to test batching
__lowercase = [
"Hello, my dog is a little",
"Today, I",
]
__lowercase = tokenizer(__lowerCamelCase , return_tensors='tf' , padding=__lowerCamelCase )
__lowercase = inputs["input_ids"]
__lowercase = model.generate(input_ids=__lowerCamelCase , attention_mask=inputs['attention_mask'] )
__lowercase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowercase = model.generate(input_ids=__lowerCamelCase )
__lowercase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
__lowercase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowercase = model.generate(input_ids=__lowerCamelCase , max_length=model.config.max_length - num_paddings )
__lowercase = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
__lowercase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowerCamelCase )
__lowercase = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowerCamelCase )
__lowercase = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(__lowerCamelCase , [non_padded_sentence, padded_sentence] )
def UpperCAmelCase ( self : str ) -> Dict:
'''simple docstring'''
__lowercase = "facebook/opt-350m"
__lowercase = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
__lowercase = []
__lowercase = GPTaTokenizer.from_pretrained(__lowerCamelCase )
__lowercase = TFOPTForCausalLM.from_pretrained(__lowerCamelCase )
for prompt in self.prompts:
__lowercase = tokenizer(__lowerCamelCase , return_tensors='tf' ).input_ids
__lowercase = model.generate(__lowerCamelCase , max_length=10 )
__lowercase = tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
| 375 |
import math
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = 0
__UpperCamelCase : int = 0
while num > 0:
__UpperCamelCase : List[Any] = num % 8
__UpperCamelCase : Tuple = octal + (remainder * math.floor(math.pow(10 , _lowerCamelCase)))
counter += 1
__UpperCamelCase : Optional[Any] = math.floor(num / 8) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(_lowerCamelCase)}'
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
print("\n2 in octal is:")
print(decimal_to_octal(2)) # = 2
print("\n8 in octal is:")
print(decimal_to_octal(8)) # = 10
print("\n65 in octal is:")
print(decimal_to_octal(65)) # = 101
print("\n216 in octal is:")
print(decimal_to_octal(216)) # = 330
print("\n512 in octal is:")
print(decimal_to_octal(512)) # = 1000
print("\n")
if __name__ == "__main__":
main() | 557 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__( unittest.TestCase ):
def __init__( self : List[str] , __snake_case : Any , __snake_case : Optional[int]=13 , __snake_case : Union[str, Any]=3 , __snake_case : List[Any]=2_24 , __snake_case : int=30 , __snake_case : Tuple=4_00 , __snake_case : Optional[Any]=True , __snake_case : List[Any]=None , __snake_case : Optional[int]=True , __snake_case : Any=[0.5, 0.5, 0.5] , __snake_case : Any=[0.5, 0.5, 0.5] , ):
a : int = size if size is not None else {'height': 18, 'width': 18}
a : Dict = parent
a : Tuple = batch_size
a : List[str] = num_channels
a : Any = image_size
a : List[Any] = min_resolution
a : List[str] = max_resolution
a : Any = do_resize
a : Dict = size
a : Tuple = do_normalize
a : Optional[Any] = image_mean
a : str = image_std
def lowercase_ ( self : Union[str, Any] ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = ViTImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any ):
a : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def lowercase_ ( self : Dict ):
return self.image_proc_tester.prepare_image_processor_dict()
def lowercase_ ( self : Any ):
a : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , 'image_mean' ) )
self.assertTrue(hasattr(__snake_case , 'image_std' ) )
self.assertTrue(hasattr(__snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(__snake_case , 'do_resize' ) )
self.assertTrue(hasattr(__snake_case , 'size' ) )
def lowercase_ ( self : List[Any] ):
pass
def lowercase_ ( self : List[Any] ):
# Initialize image_processor
a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
a : str = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
a : int = image_processor(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def lowercase_ ( self : int ):
# Initialize image_processor
a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
a : Union[str, Any] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
a : str = image_processor(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
def lowercase_ ( self : int ):
# Initialize image_processor
a : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
a : List[Any] = image_processor(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , )
# Test batched
a : List[Any] = image_processor(__snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['height'],
self.image_proc_tester.size['width'],
) , ) | 195 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a__:
@staticmethod
def lowercase_ ( *__snake_case : int , **__snake_case : str ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a__( unittest.TestCase ):
lowercase__ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowercase_ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Tuple ):
a : Tuple = ObjectDetectionPipeline(model=__snake_case , image_processor=__snake_case )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowercase_ ( self : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] ):
a : Any = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(__snake_case ) , 0 )
for detected_object in outputs:
self.assertEqual(
__snake_case , {
'score': ANY(__snake_case ),
'label': ANY(__snake_case ),
'box': {'xmin': ANY(__snake_case ), 'ymin': ANY(__snake_case ), 'xmax': ANY(__snake_case ), 'ymax': ANY(__snake_case )},
} , )
import datasets
a : Any = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
a : Tuple = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
a : List[Any] = object_detector(__snake_case , threshold=0.0 )
self.assertEqual(len(__snake_case ) , len(__snake_case ) )
for outputs in batch_outputs:
self.assertGreater(len(__snake_case ) , 0 )
for detected_object in outputs:
self.assertEqual(
__snake_case , {
'score': ANY(__snake_case ),
'label': ANY(__snake_case ),
'box': {'xmin': ANY(__snake_case ), 'ymin': ANY(__snake_case ), 'xmax': ANY(__snake_case ), 'ymax': ANY(__snake_case )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def lowercase_ ( self : List[str] ):
pass
@require_torch
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
a : str = AutoModelForObjectDetection.from_pretrained(__snake_case )
a : str = AutoFeatureExtractor.from_pretrained(__snake_case )
a : Any = ObjectDetectionPipeline(model=__snake_case , feature_extractor=__snake_case )
a : str = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
] , )
a : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
] , )
@require_torch
@slow
def lowercase_ ( self : Optional[int] ):
a : Union[str, Any] = 'facebook/detr-resnet-50'
a : str = AutoModelForObjectDetection.from_pretrained(__snake_case )
a : List[Any] = AutoFeatureExtractor.from_pretrained(__snake_case )
a : str = ObjectDetectionPipeline(model=__snake_case , feature_extractor=__snake_case )
a : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
a : List[str] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def lowercase_ ( self : Any ):
a : Any = 'facebook/detr-resnet-50'
a : int = pipeline('object-detection' , model=__snake_case )
a : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
a : int = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = 0.9985
a : Optional[int] = 'facebook/detr-resnet-50'
a : List[Any] = pipeline('object-detection' , model=__snake_case )
a : Any = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=__snake_case )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def lowercase_ ( self : Dict ):
a : Optional[int] = 'Narsil/layoutlmv3-finetuned-funsd'
a : Optional[int] = 0.9993
a : List[Any] = pipeline('object-detection' , model=__snake_case , threshold=__snake_case )
a : Union[str, Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(__snake_case , decimals=4 ) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
] , ) | 195 | 1 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def snake_case__ ( UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ):
# Initialise PyTorch model
lowerCAmelCase__ :Tuple = AlbertConfig.from_json_file(UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCAmelCase__ :List[str] = AlbertForPreTraining(UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 145 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : Optional[int] = 16
_a : List[Any] = 32
def snake_case__ ( UpperCAmelCase : Accelerator , UpperCAmelCase : int = 1_6 ):
lowerCAmelCase__ :Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ :Dict = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ :Dict = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ :str = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ :Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ :int = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ :List[str] = 8
else:
lowerCAmelCase__ :Dict = None
return tokenizer.pad(
UpperCAmelCase , padding="longest" , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase__ :int = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowerCAmelCase__ :List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : List[str] = mocked_dataloaders # noqa: F811
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : Dict ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCAmelCase ) == "1":
lowerCAmelCase__ :Union[str, Any] = 2
# New Code #
lowerCAmelCase__ :List[str] = int(args.gradient_accumulation_steps )
# Initialize accelerator
lowerCAmelCase__ :List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ :Union[str, Any] = config["lr"]
lowerCAmelCase__ :Dict = int(config["num_epochs"] )
lowerCAmelCase__ :str = int(config["seed"] )
lowerCAmelCase__ :int = int(config["batch_size"] )
lowerCAmelCase__ :Any = evaluate.load("glue" , "mrpc" )
set_seed(UpperCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ :Dict = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ :Any = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ :str = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ :Tuple = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
lowerCAmelCase__ :Optional[int] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :List[Any] = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCAmelCase ):
lowerCAmelCase__ :Optional[int] = model(**UpperCAmelCase )
lowerCAmelCase__ :Any = output.loss
accelerator.backward(UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ :Optional[int] = model(**UpperCAmelCase )
lowerCAmelCase__ :int = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ ,lowerCAmelCase__ :Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowerCAmelCase__ :List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase )
def snake_case__ ( ):
lowerCAmelCase__ :str = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=UpperCAmelCase , default=UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCAmelCase__ :int = parser.parse_args()
lowerCAmelCase__ :Union[str, Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 145 | 1 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = CodeGenTokenizer
_UpperCAmelCase = CodeGenTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = {'''add_prefix_space''': True}
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
snake_case = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case = {'''unk_token''': '''<unk>'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , A__ ) -> Tuple:
snake_case = '''lower newer'''
snake_case = '''lower newer'''
return input_text, output_text
def UpperCamelCase ( self ) -> List[Any]:
snake_case = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''lower newer'''
snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ )
self.assertListEqual(A__ , A__ )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer(add_prefix_space=A__ )
snake_case = '''lower newer'''
# Testing tokenization
snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids without special tokens
snake_case = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids with special tokens
snake_case = self.get_rust_tokenizer(add_prefix_space=A__ )
snake_case = tokenizer.encode(A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# Testing the unknown token
snake_case = tokens + [rust_tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase ( self , A__=15 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
def UpperCamelCase ( self ) -> Tuple:
snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input looooooooong''', '''This is a simple input''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
snake_case = tokenizer.pad_token_id
snake_case = tokenizer(A__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' )
snake_case = tokenizer(*A__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def UpperCamelCase ( self ) -> str:
snake_case = '''$$$'''
snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A__ , add_bos_token=A__ )
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = tokenizer.bos_token_id
snake_case = tokenizer(A__ )
snake_case = tokenizer(A__ )
self.assertEqual(out_s.input_ids[0] , A__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case = tokenizer.decode(out_s.input_ids )
snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
snake_case = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
snake_case = '''\nif len_a > len_b: result = a\nelse: result = b'''
snake_case = tokenizer.encode(A__ )
snake_case = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
snake_case = tokenizer.decode(A__ , truncate_before_pattern=A__ )
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
| 716 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __UpperCamelCase ( a : Optional[int] ) ->Dict:
snake_case = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a , a )
def __UpperCamelCase ( a : Optional[Any] ) ->int:
snake_case = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case = s_dict.pop(a )
elif "subsample" in key:
snake_case = s_dict.pop(a )
def __UpperCamelCase ( a : Optional[int] ) ->Optional[int]:
snake_case , snake_case = emb.weight.shape
snake_case = nn.Linear(a , a , bias=a )
snake_case = emb.weight.data
return lin_layer
def __UpperCamelCase ( a : Any , a : Tuple ) ->Tuple:
snake_case = torch.load(a , map_location='''cpu''' )
snake_case = mam_aaa['''args''']
snake_case = mam_aaa['''model''']
snake_case = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(a )
rename_keys(a )
snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0]
snake_case = args.share_decoder_input_output_embed
snake_case = [int(a ) for i in args.conv_kernel_sizes.split(''',''' )]
snake_case = SpeechaTextConfig(
vocab_size=a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a ) , conv_channels=args.conv_channels , conv_kernel_sizes=a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a , num_beams=5 , max_length=200 , use_cache=a , decoder_start_token_id=2 , early_stopping=a , )
snake_case = SpeechaTextForConditionalGeneration(a )
snake_case , snake_case = model.model.load_state_dict(a , strict=a )
if len(a ) > 0 and not set(a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
snake_case = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case = lm_head_weights
model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 44 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowercase ( yaml.SafeLoader ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : Union[str, Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
_snake_case : Optional[Any] = [tuple(lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else key for key in keys]
_snake_case : int = Counter(lowerCamelCase_ )
_snake_case : str = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any]=False ):
'''simple docstring'''
_snake_case : List[Any] = super().construct_mapping(lowerCamelCase_ , deep=lowerCamelCase_ )
self._check_no_duplicates_on_constructed_node(lowerCamelCase_ )
return mapping
def A__( __lowerCAmelCase ):
_snake_case : Dict = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_snake_case : Dict = full_content[1:].index('---' ) + 1
_snake_case : Optional[int] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__lowerCAmelCase )
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Any = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def __UpperCAmelCase ( cls : int , lowerCamelCase_ : Path ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding='utf-8' ) as readme_file:
_snake_case , _snake_case : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(lowerCamelCase_ )
else:
return cls()
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Path ):
'''simple docstring'''
if path.exists():
with open(lowerCamelCase_ , encoding='utf-8' ) as readme_file:
_snake_case : Any = readme_file.read()
else:
_snake_case : str = None
_snake_case : Any = self._to_readme(lowerCamelCase_ )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
_snake_case , _snake_case : Optional[Any] = _split_yaml_from_readme(lowerCamelCase_ )
_snake_case : Dict = '---\n' + self.to_yaml_string() + '---\n' + content
else:
_snake_case : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def __UpperCAmelCase ( cls : Optional[Any] , lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : List[Any] = yaml.load(lowerCamelCase_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_snake_case : int = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=lowerCamelCase_ , allow_unicode=lowerCamelCase_ , encoding='utf-8' , ).decode('utf-8' )
lowercase_ : Optional[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowercase_ : int = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
lowercase_ : Optional[int] = ap.parse_args()
lowercase_ : Tuple = Path(args.readme_filepath)
lowercase_ : Any = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 304 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Optional[Any] = (DEISMultistepScheduler,)
_UpperCamelCase : Union[str, Any] = (("num_inference_steps", 25),)
def __UpperCAmelCase ( self : Dict , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
_snake_case : Any = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**lowerCamelCase_ )
return config
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : Optional[Any]=0 , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : Dict = dict(self.forward_default_kwargs )
_snake_case : Tuple = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
_snake_case : List[Any] = self.dummy_sample
_snake_case : Optional[int] = 0.1 * sample
_snake_case : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : Optional[Any] = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
_snake_case : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
_snake_case : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
_snake_case : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case , _snake_case : Optional[Any] = sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
_snake_case : Optional[int] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Any = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : int , lowerCamelCase_ : int=0 , **lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = dict(self.forward_default_kwargs )
_snake_case : Optional[int] = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
_snake_case : Any = self.dummy_sample
_snake_case : Union[str, Any] = 0.1 * sample
_snake_case : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : Any = self.get_scheduler_config()
_snake_case : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
_snake_case : List[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Tuple = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : List[Any]=None , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
if scheduler is None:
_snake_case : Dict = self.scheduler_classes[0]
_snake_case : int = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : Dict = scheduler_class(**lowerCamelCase_ )
_snake_case : Tuple = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : Tuple = scheduler_class(**lowerCamelCase_ )
_snake_case : str = 10
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Dict = model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Dict = dict(self.forward_default_kwargs )
_snake_case : Any = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
_snake_case : List[str] = self.get_scheduler_config()
_snake_case : int = scheduler_class(**lowerCamelCase_ )
_snake_case : Union[str, Any] = self.dummy_sample
_snake_case : Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , 'set_timesteps' ):
_snake_case : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
_snake_case : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
_snake_case : Any = scheduler.timesteps[5]
_snake_case : List[str] = scheduler.timesteps[6]
_snake_case : List[str] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Optional[int] = DEISMultistepScheduler(**self.get_scheduler_config() )
_snake_case : List[str] = self.full_loop(scheduler=lowerCamelCase_ )
_snake_case : int = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
_snake_case : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_snake_case : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
_snake_case : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_snake_case : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
_snake_case : Dict = self.full_loop(scheduler=lowerCamelCase_ )
_snake_case : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , algorithm_type='deis' , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
_snake_case : str = self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : str = self.full_loop()
_snake_case : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
_snake_case : Optional[Any] = self.full_loop(prediction_type='v_prediction' )
_snake_case : List[str] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
_snake_case : str = self.scheduler_classes[0]
_snake_case : Dict = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 )
_snake_case : Optional[int] = scheduler_class(**lowerCamelCase_ )
_snake_case : str = 10
_snake_case : Tuple = self.dummy_model()
_snake_case : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Any = model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[int] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 304 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = '▁'
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = BigBirdTokenizer
_SCREAMING_SNAKE_CASE : Any = BigBirdTokenizerFast
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = True
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
super().setUp()
_UpperCAmelCase : Dict = self.tokenizer_class(UpperCAmelCase_ , keep_accents=UpperCAmelCase_)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = '''<s>'''
_UpperCAmelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_)
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''[MASK]''')
self.assertEqual(len(UpperCAmelCase_) , 1004)
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_UpperCAmelCase : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
_UpperCAmelCase : str = rust_tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCAmelCase : Dict = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
_UpperCAmelCase : Dict = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCAmelCase : str = self.get_rust_tokenizer()
_UpperCAmelCase : List[str] = tokenizer.encode(UpperCAmelCase_)
_UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def snake_case__ ( self) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : str = BigBirdTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_)
_UpperCAmelCase : int = tokenizer.tokenize('''This is a test''')
self.assertListEqual(UpperCAmelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [285, 46, 10, 170, 382] , )
_UpperCAmelCase : int = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_UpperCAmelCase : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_)
self.assertListEqual(
UpperCAmelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(UpperCAmelCase_)
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def snake_case__ ( self) -> Any:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
@slow
def snake_case__ ( self) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = '''Hello World!'''
_UpperCAmelCase : Dict = [65, 18536, 2260, 101, 66]
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_))
@slow
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
# fmt: off
_UpperCAmelCase : Any = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_))
@require_torch
@slow
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_UpperCAmelCase : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys())[:10]
_UpperCAmelCase : Optional[Any] = ''' '''.join(UpperCAmelCase_)
_UpperCAmelCase : Tuple = self.big_tokenizer.encode_plus(UpperCAmelCase_ , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase_)
_UpperCAmelCase : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=UpperCAmelCase_)
_UpperCAmelCase : Optional[Any] = BigBirdConfig(attention_type='''original_full''')
_UpperCAmelCase : Any = BigBirdModel(UpperCAmelCase_)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCAmelCase_)
model(**UpperCAmelCase_)
@slow
def snake_case__ ( self) -> str:
"""simple docstring"""
_UpperCAmelCase : int = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
_UpperCAmelCase : Optional[Any] = tokenizer.decode(tokenizer('''Paris is the [MASK].''').input_ids)
self.assertTrue(decoded_text == '''[CLS] Paris is the[MASK].[SEP]''')
@slow
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = {'''input_ids''': [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='''google/bigbird-roberta-base''' , revision='''215c99f1600e06f83acce68422f2035b2b5c3510''' , )
| 711 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowercase , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = None
_SCREAMING_SNAKE_CASE : str = BloomTokenizerFast
_SCREAMING_SNAKE_CASE : str = BloomTokenizerFast
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : Optional[int] = "tokenizer_file"
_SCREAMING_SNAKE_CASE : Optional[Any] = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def snake_case__ ( self) -> Dict:
"""simple docstring"""
super().setUp()
_UpperCAmelCase : List[Any] = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''')
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self , **_A) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_A)
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : int = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
_UpperCAmelCase : List[Any] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
_UpperCAmelCase : Any = tokenizer.batch_encode_plus(_A)['''input_ids''']
self.assertListEqual(_A , _A)
_UpperCAmelCase : Any = tokenizer.batch_decode(_A)
self.assertListEqual(_A , _A)
def snake_case__ ( self , _A=6) -> Optional[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
_UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(_A , **_A)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_UpperCAmelCase : List[Any] = '''This is a simple input'''
_UpperCAmelCase : Any = ['''This is a simple input 1''', '''This is a simple input 2''']
_UpperCAmelCase : int = ('''This is a simple input''', '''This is a pair''')
_UpperCAmelCase : Optional[int] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(_A , max_length=_A)
tokenizer_r.encode_plus(_A , max_length=_A)
tokenizer_r.batch_encode_plus(_A , max_length=_A)
tokenizer_r.encode(_A , max_length=_A)
tokenizer_r.batch_encode_plus(_A , max_length=_A)
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''')
_UpperCAmelCase : Tuple = None # Hotfixing padding = None
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''')
# Simple input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''')
# Simple input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
# Pair input
self.assertRaises(_A , tokenizer_r.encode , _A , max_length=_A , padding='''max_length''')
# Pair input
self.assertRaises(_A , tokenizer_r.encode_plus , _A , max_length=_A , padding='''max_length''')
# Pair input
self.assertRaises(
_A , tokenizer_r.batch_encode_plus , _A , max_length=_A , padding='''max_length''' , )
def snake_case__ ( self) -> Any:
"""simple docstring"""
_UpperCAmelCase : Dict = self.get_rust_tokenizer()
_UpperCAmelCase : int = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=_A)
_UpperCAmelCase : Tuple = next(iter(_A))['''premise'''] # pick up one data
_UpperCAmelCase : List[Any] = list(sample_data.values())
_UpperCAmelCase : Any = list(map(tokenizer.encode , _A))
_UpperCAmelCase : List[str] = [tokenizer.decode(_A , clean_up_tokenization_spaces=_A) for x in output_tokens]
self.assertListEqual(_A , _A)
def snake_case__ ( self) -> Optional[Any]:
"""simple docstring"""
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 186 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __A ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :List[str] , __snake_case :bool , __snake_case :Optional[int] = None , __snake_case :Optional[int] = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : int =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__magic_name__ : Dict =torch.zeros(__snake_case , __snake_case )
else:
__magic_name__ : Tuple =None
__magic_name__ : int =torch.nn.Parameter(__snake_case )
class __A ( UpperCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def __init__( self :Dict , __snake_case :VQModel , __snake_case :CLIPTextModel , __snake_case :CLIPTokenizer , __snake_case :TransformeraDModel , __snake_case :VQDiffusionScheduler , __snake_case :LearnedClassifierFreeSamplingEmbeddings , ):
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=__snake_case , transformer=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , scheduler=__snake_case , learned_classifier_free_sampling_embeddings=__snake_case , )
def A__ ( self :Optional[int] , __snake_case :Optional[int] , __snake_case :Optional[Any] , __snake_case :Dict ):
'''simple docstring'''
__magic_name__ : List[str] =len(__snake_case ) if isinstance(__snake_case , __snake_case ) else 1
# get prompt text embeddings
__magic_name__ : str =self.tokenizer(
__snake_case , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__magic_name__ : str =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__magic_name__ : int =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__magic_name__ : List[Any] =text_input_ids[:, : self.tokenizer.model_max_length]
__magic_name__ : Union[str, Any] =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__magic_name__ : Optional[Any] =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate text embeddings for each generation per prompt
__magic_name__ : Any =prompt_embeds.repeat_interleave(__snake_case , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__magic_name__ : Union[str, Any] =self.learned_classifier_free_sampling_embeddings.embeddings
__magic_name__ : Optional[Any] =negative_prompt_embeds.unsqueeze(0 ).repeat(__snake_case , 1 , 1 )
else:
__magic_name__ : Any =[""""""] * batch_size
__magic_name__ : List[str] =text_input_ids.shape[-1]
__magic_name__ : Tuple =self.tokenizer(
__snake_case , padding="""max_length""" , max_length=__snake_case , truncation=__snake_case , return_tensors="""pt""" , )
__magic_name__ : List[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__magic_name__ : Any =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__ : Any =negative_prompt_embeds.shape[1]
__magic_name__ : str =negative_prompt_embeds.repeat(1 , __snake_case , 1 )
__magic_name__ : Any =negative_prompt_embeds.view(batch_size * num_images_per_prompt , __snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ : Optional[int] =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self :Optional[Any] , __snake_case :Union[str, List[str]] , __snake_case :int = 1_00 , __snake_case :float = 5.0 , __snake_case :float = 1.0 , __snake_case :int = 1 , __snake_case :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case :Optional[torch.FloatTensor] = None , __snake_case :Optional[str] = "pil" , __snake_case :bool = True , __snake_case :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __snake_case :int = 1 , ):
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
__magic_name__ : str =1
elif isinstance(__snake_case , __snake_case ):
__magic_name__ : List[str] =len(__snake_case )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__snake_case )}" )
__magic_name__ : List[str] =batch_size * num_images_per_prompt
__magic_name__ : Dict =guidance_scale > 1.0
__magic_name__ : Union[str, Any] =self._encode_prompt(__snake_case , __snake_case , __snake_case )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__snake_case )}." )
# get the initial completely masked latents unless the user supplied it
__magic_name__ : List[Any] =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
__magic_name__ : List[Any] =self.transformer.num_vector_embeds - 1
__magic_name__ : Union[str, Any] =torch.full(__snake_case , __snake_case ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
__magic_name__ : Dict =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
__magic_name__ : Optional[Any] =self.scheduler.timesteps.to(self.device )
__magic_name__ : List[Any] =latents
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the sample if we are doing classifier free guidance
__magic_name__ : Dict =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__magic_name__ : List[str] =self.transformer(__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case ).sample
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ : List[str] =model_output.chunk(2 )
__magic_name__ : List[Any] =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__snake_case , dim=1 , keepdim=__snake_case )
__magic_name__ : Any =self.truncate(__snake_case , __snake_case )
# remove `log(0)`'s (`-inf`s)
__magic_name__ : Union[str, Any] =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : Union[str, Any] =self.scheduler.step(__snake_case , timestep=__snake_case , sample=__snake_case , generator=__snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case , __snake_case )
__magic_name__ : Tuple =self.vqvae.config.vq_embed_dim
__magic_name__ : str =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__magic_name__ : Optional[int] =self.vqvae.quantize.get_codebook_entry(__snake_case , shape=__snake_case )
__magic_name__ : Any =self.vqvae.decode(__snake_case , force_not_quantize=__snake_case ).sample
__magic_name__ : Any =(image / 2 + 0.5).clamp(0 , 1 )
__magic_name__ : Optional[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__magic_name__ : Union[str, Any] =self.numpy_to_pil(__snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__snake_case )
def A__ ( self :List[str] , __snake_case :torch.FloatTensor , __snake_case :float ):
'''simple docstring'''
__magic_name__ , __magic_name__ : int =torch.sort(__snake_case , 1 , descending=__snake_case )
__magic_name__ : Any =torch.exp(__snake_case )
__magic_name__ : Dict =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__magic_name__ : Dict =torch.full_like(keep_mask[:, 0:1, :] , __snake_case )
__magic_name__ : str =torch.cat((all_true, keep_mask) , dim=1 )
__magic_name__ : Tuple =keep_mask[:, :-1, :]
__magic_name__ : Optional[int] =keep_mask.gather(1 , indices.argsort(1 ) )
__magic_name__ : Optional[int] =log_p_x_0.clone()
__magic_name__ : Union[str, Any] =-torch.inf # -inf = log(0)
return rv
| 21 |
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
A : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
A : int = {"""target_lang""": """fi""", """source_lang""": """en"""}
A : Tuple = """>>zh<<"""
A : Optional[int] = """Helsinki-NLP/"""
if is_torch_available():
A : Dict = """pt"""
elif is_tf_available():
A : Optional[int] = """tf"""
else:
A : List[str] = """jax"""
@require_sentencepiece
class lowerCAmelCase_ ( a_ , unittest.TestCase ):
__UpperCAmelCase = MarianTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = True
def __snake_case ( self : List[str] ):
'''simple docstring'''
super().setUp()
snake_case : Dict =['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>''']
snake_case : Optional[Any] =dict(zip(_snake_case, range(len(_snake_case ) ) ) )
snake_case : Dict =Path(self.tmpdirname )
save_json(_snake_case, save_dir / VOCAB_FILES_NAMES['''vocab'''] )
save_json(_snake_case, save_dir / VOCAB_FILES_NAMES['''tokenizer_config_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_snake_case, save_dir / VOCAB_FILES_NAMES['''source_spm'''] )
copyfile(_snake_case, save_dir / VOCAB_FILES_NAMES['''target_spm'''] )
snake_case : Any =MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : List[str], **_snake_case : Tuple ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname, **_snake_case )
def __snake_case ( self : Any, _snake_case : Dict ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : Optional[int] ='''</s>'''
snake_case : int =0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ), _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ), _snake_case )
def __snake_case ( self : Any ):
'''simple docstring'''
snake_case : Tuple =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''</s>''' )
self.assertEqual(vocab_keys[1], '''<unk>''' )
self.assertEqual(vocab_keys[-1], '''<pad>''' )
self.assertEqual(len(_snake_case ), 9 )
def __snake_case ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 9 )
def __snake_case ( self : Tuple ):
'''simple docstring'''
snake_case : Tuple =MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
snake_case : List[str] =en_de_tokenizer(['''I am a small frog'''], return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
snake_case : Any =[38, 121, 14, 697, 38_848, 0]
self.assertListEqual(_snake_case, batch.input_ids[0] )
snake_case : List[Any] =tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_snake_case )
snake_case : List[Any] =[x.name for x in Path(_snake_case ).glob('''*''' )]
self.assertIn('''source.spm''', _snake_case )
MarianTokenizer.from_pretrained(_snake_case )
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
snake_case : Any =self.get_tokenizer()
snake_case : int =tok(
['''I am a small frog''' * 1_000, '''I am a small frog'''], padding=_snake_case, truncation=_snake_case, return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
self.assertEqual(batch.input_ids.shape, (2, 512) )
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : List[str] =self.get_tokenizer()
snake_case : int =tok(['''I am a tiny frog''', '''I am a small frog'''], padding=_snake_case, return_tensors=_snake_case )
self.assertIsInstance(_snake_case, _snake_case )
self.assertEqual(batch_smaller.input_ids.shape, (2, 10) )
@slow
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
snake_case : List[Any] ={'''input_ids''': [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case, model_name='''Helsinki-NLP/opus-mt-en-de''', revision='''1a8c2263da11e68e50938f97e10cd57820bd504c''', decode_kwargs={'''use_source_tokenizer''': True}, )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
snake_case : Optional[Any] =MarianTokenizer.from_pretrained('''hf-internal-testing/test-marian-two-vocabs''' )
snake_case : List[str] ='''Tämä on testi'''
snake_case : Optional[int] ='''This is a test'''
snake_case : Optional[Any] =[76, 7, 2_047, 2]
snake_case : int =[69, 12, 11, 940, 2]
snake_case : Optional[int] =tokenizer(_snake_case ).input_ids
self.assertListEqual(_snake_case, _snake_case )
snake_case : Optional[Any] =tokenizer(text_target=_snake_case ).input_ids
self.assertListEqual(_snake_case, _snake_case )
snake_case : Optional[int] =tokenizer.decode(_snake_case, skip_special_tokens=_snake_case )
self.assertEqual(_snake_case, _snake_case )
| 349 | 0 |
from math import log
from scipy.constants import Boltzmann, physical_constants
lowercase = 3_0_0 # TEMPERATURE (unit = K)
def __lowerCAmelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , ) -> float:
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __A( unittest.TestCase ):
def lowercase__ ( self : Optional[Any] ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCamelCase_ = [[1, 2, 4], [1, 2, 3, 4]]
lowerCamelCase_ = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , __UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowercase__ ( self : List[Any] ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCamelCase_ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def lowercase__ ( self : Union[str, Any] ):
lowerCamelCase_ = [[1, 2, 3], [1, 2, 4]]
lowerCamelCase_ = DisjunctiveConstraint(__UpperCamelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(1 )
lowerCamelCase_ = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(2 )
lowerCamelCase_ = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(3 )
lowerCamelCase_ = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowercase__ ( self : Tuple ):
lowerCamelCase_ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCamelCase_ = DisjunctiveConstraint(__UpperCamelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 103 | 0 |
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
__a = model
__a = kwargs.get("""model_save_dir""" , SCREAMING_SNAKE_CASE__ )
__a = kwargs.get("""latest_model_name""" , SCREAMING_SNAKE_CASE__ )
def __call__( self : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
__a = {k: np.array(SCREAMING_SNAKE_CASE__ ) for k, v in kwargs.items()}
return self.model.run(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@staticmethod
def __a ( SCREAMING_SNAKE_CASE__ : Union[str, Path] , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=None ):
'''simple docstring'''
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
__a = """CPUExecutionProvider"""
return ort.InferenceSession(SCREAMING_SNAKE_CASE__ , providers=[provider] , sess_options=SCREAMING_SNAKE_CASE__ )
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Path] , SCREAMING_SNAKE_CASE__ : Optional[str] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__a = self.model_save_dir.joinpath(self.latest_model_name )
__a = Path(SCREAMING_SNAKE_CASE__ ).joinpath(SCREAMING_SNAKE_CASE__ )
try:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__a = self.model_save_dir.joinpath(SCREAMING_SNAKE_CASE__ )
if src_path.exists():
__a = Path(SCREAMING_SNAKE_CASE__ ).joinpath(SCREAMING_SNAKE_CASE__ )
try:
shutil.copyfile(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except shutil.SameFileError:
pass
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Provided path ({save_directory}) should be a directory, not a file''' )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
# saving model weights/files
self._save_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def __a ( cls : int , SCREAMING_SNAKE_CASE__ : Union[str, Path] , SCREAMING_SNAKE_CASE__ : Optional[Union[bool, str, None]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, None]] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional["ort.SessionOptions"] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
__a = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
__a = OnnxRuntimeModel.load_model(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , provider=SCREAMING_SNAKE_CASE__ , sess_options=SCREAMING_SNAKE_CASE__ )
__a = Path(SCREAMING_SNAKE_CASE__ )
# load model from hub
else:
# download model
__a = hf_hub_download(
repo_id=SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , )
__a = Path(SCREAMING_SNAKE_CASE__ ).parent
__a = Path(SCREAMING_SNAKE_CASE__ ).name
__a = OnnxRuntimeModel.load_model(SCREAMING_SNAKE_CASE__ , provider=SCREAMING_SNAKE_CASE__ , sess_options=SCREAMING_SNAKE_CASE__ )
return cls(model=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@classmethod
def __a ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Path] , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
'''simple docstring'''
__a = None
if len(str(SCREAMING_SNAKE_CASE__ ).split("""@""" ) ) == 2:
__a , __a = model_id.split("""@""" )
return cls._from_pretrained(
model_id=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 582 |
'''simple docstring'''
import numpy as np
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 582 | 1 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowerCAmelCase_ ( _a):
def _snake_case ( self : List[Any] ) ->Optional[int]:
"""simple docstring"""
a__ :List[Any] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
with self.assertRaises(__A ):
a__ :List[str] = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : List[str] ) ->int:
"""simple docstring"""
with self.assertRaises(__A ):
a__ :Optional[int] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _snake_case ( self : Any ) ->List[str]:
"""simple docstring"""
a__ :Optional[int] = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
a__ :Any = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _snake_case ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
a__ :Dict = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Dict ) ->List[str]:
"""simple docstring"""
a__ :Tuple = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : int ) ->str:
"""simple docstring"""
a__ :List[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _snake_case ( self : Optional[Any] ) ->int:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
a__ :List[Any] = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _snake_case ( self : Tuple ) ->Tuple:
"""simple docstring"""
a__ :Any = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _snake_case ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a__ :Any = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
import PIL.Image
a__ :List[Any] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=__A ) as mock_cast_to_python_objects:
a__ :Optional[Any] = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
a__ :Tuple = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , __A )
self.assertFalse(kwargs["optimize_list_casting"] )
def lowerCamelCase__ ( a : List[Any] , a : int ) -> Any:
"""simple docstring"""
a__ :List[str] = pa.BufferReader(a ) if isinstance(a , pa.Buffer ) else pa.memory_map(a )
a__ :int = pa.ipc.open_stream(a )
a__ :pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCamelCase__ ( a : str , a : List[Any] ) -> Tuple:
"""simple docstring"""
a__ :Any = pa.BufferOutputStream()
a__ :Any = pa.schema(a ) if fields else None
with ArrowWriter(stream=a , schema=a , writer_batch_size=a ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
a__ :str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a__ :Union[str, Any] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCamelCase__ ( ) -> str:
"""simple docstring"""
a__ :Optional[Any] = pa.BufferOutputStream()
a__ :Any = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=a , features=a ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
a__ :str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
a__ :int = pa.BufferReader(output.getvalue() )
a__ :Optional[Any] = pa.ipc.open_stream(a )
a__ :pa.Table = f.read_all()
a__ :Tuple = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(a )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def lowerCamelCase__ ( a : Dict ) -> Optional[int]:
"""simple docstring"""
a__ :Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=a , writer_batch_size=a , hash_salt="split_name" , check_duplicates=a , ) as writer:
with pytest.raises(a ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
a__ :Any = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def lowerCamelCase__ ( a : Dict ) -> List[Any]:
"""simple docstring"""
a__ :int = pa.BufferOutputStream()
with ArrowWriter(
stream=a , writer_batch_size=a , hash_salt="split_name" , check_duplicates=a , ) as writer:
with pytest.raises(a ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
a__ :List[Any] = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def lowerCamelCase__ ( a : int ) -> Dict:
"""simple docstring"""
a__ :Dict = pa.BufferOutputStream()
with ArrowWriter(
stream=a , writer_batch_size=a , hash_salt="split_name" , check_duplicates=a , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
a__ :Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCamelCase__ ( a : Tuple , a : int ) -> List[str]:
"""simple docstring"""
a__ :str = pa.BufferOutputStream()
a__ :Any = pa.schema(a ) if fields else None
with ArrowWriter(stream=a , schema=a , writer_batch_size=a ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
a__ :Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a__ :List[str] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCamelCase__ ( a : List[str] , a : str ) -> List[str]:
"""simple docstring"""
a__ :str = pa.BufferOutputStream()
a__ :str = pa.schema(a ) if fields else None
with ArrowWriter(stream=a , schema=a , writer_batch_size=a ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
a__ :Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a__ :Optional[int] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowerCamelCase__ ( a : int , a : Tuple ) -> List[Any]:
"""simple docstring"""
a__ :List[str] = pa.BufferOutputStream()
a__ :Dict = pa.schema(a ) if fields else None
with ArrowWriter(stream=a , schema=a , writer_batch_size=a ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
a__ :Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a__ :Any = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(a , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a__ :Dict = {"col_1": pa.string(), "col_2": pa.intaa()}
a__ :Any = os.path.join(a , "test.arrow" )
with ArrowWriter(path=a , schema=pa.schema(a ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
a__ :Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(a , metadata=writer._schema.metadata )
_check_output(a , 1 )
def lowerCamelCase__ ( a : Optional[Any] ) -> Tuple:
"""simple docstring"""
if pa.types.is_list(a ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowerCamelCase__ ( a : List[Any] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
if isinstance(lst[0] , a ):
change_first_primitive_element_in_list(lst[0] , a )
else:
a__ :List[str] = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCamelCase__ ( a : Optional[Any] , a : str , a : Tuple ) -> Tuple:
"""simple docstring"""
a__ :Dict = pa.array(TypedSequence(a , optimized_int_type=a ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowerCamelCase__ ( a : Any , a : Optional[Any] , a : Dict ) -> Dict:
"""simple docstring"""
a__ :int = pa.array(OptimizedTypedSequence(a , col=a ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
a__ :Optional[Any] = copy.deepcopy(a )
a__ :Optional[int] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(a , a )
a__ :Optional[Any] = pa.array(OptimizedTypedSequence(a , col=a ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def lowerCamelCase__ ( a : Tuple , a : Tuple ) -> str:
"""simple docstring"""
a__ :List[Any] = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=a ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowerCamelCase__ ( a : Tuple ) -> Optional[int]:
"""simple docstring"""
a__ :List[str] = "mock://dataset-train.arrow"
with ArrowWriter(path=a , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(a ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
a__ :Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(a )
def lowerCamelCase__ ( ) -> Any:
"""simple docstring"""
a__ :List[str] = pa.BufferOutputStream()
with ParquetWriter(stream=a ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
a__ :Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
a__ :Optional[int] = pa.BufferReader(output.getvalue() )
a__ :pa.Table = pq.read_table(a )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def lowerCamelCase__ ( a : Any , a : Tuple ) -> int:
"""simple docstring"""
import PIL.Image
a__ :Optional[Any] = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(a , format="png" )
a__ :Any = pa.BufferOutputStream()
with ParquetWriter(
stream=a , features=Features({"image": Image()} ) , embed_local_files=a ) as writer:
writer.write({"image": image_path} )
writer.finalize()
a__ :Any = pa.BufferReader(output.getvalue() )
a__ :pa.Table = pq.read_table(a )
a__ :List[str] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , a )
with open(a , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowerCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
a__ :Tuple = pa.schema([pa.field("col_1" , pa.string() , nullable=a )] )
a__ :Optional[Any] = pa.BufferOutputStream()
with ArrowWriter(stream=a ) as writer:
writer._build_writer(inferred_schema=a )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 711 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowerCamelCase__ ( a : Any , a : Any , a : List[Any] ) -> Any:
"""simple docstring"""
a__ :Optional[Any] = state_dict.pop(a )
a__ :Tuple = val
def lowerCamelCase__ ( a : List[str] ) -> str:
"""simple docstring"""
a__ :Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
a__ :List[Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
a__ :List[Any] = value
else:
a__ :Dict = value
return new_state_dict
def lowerCamelCase__ ( a : str , a : Dict=False ) -> List[Any]:
"""simple docstring"""
a__ :Optional[int] = ""
if is_panoptic:
a__ :str = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
a__ :str = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
a__ :Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
a__ :int = in_proj_weight[:256, :]
a__ :List[str] = in_proj_bias[:256]
a__ :List[str] = in_proj_weight[256:512, :]
a__ :Optional[Any] = in_proj_bias[256:512]
a__ :Dict = in_proj_weight[-256:, :]
a__ :Tuple = in_proj_bias[-256:]
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
a__ :str = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ :Optional[Any] = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( a : Tuple , a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
a__ :Optional[int] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
a__ :str = "resnet101"
if "dc5" in model_name:
a__ :Any = True
a__ :Optional[int] = "panoptic" in model_name
if is_panoptic:
a__ :Union[str, Any] = 250
else:
a__ :str = 91
a__ :Dict = "huggingface/label-files"
a__ :Optional[Any] = "coco-detection-id2label.json"
a__ :Dict = json.load(open(hf_hub_download(a , a , repo_type="dataset" ) , "r" ) )
a__ :str = {int(a ): v for k, v in idalabel.items()}
a__ :int = idalabel
a__ :Dict = {v: k for k, v in idalabel.items()}
# load image processor
a__ :Union[str, Any] = "coco_panoptic" if is_panoptic else "coco_detection"
a__ :Any = ConditionalDetrImageProcessor(format=a )
# prepare image
a__ :int = prepare_img()
a__ :Optional[int] = image_processor(images=a , return_tensors="pt" )
a__ :int = encoding["pixel_values"]
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
a__ :str = torch.hub.load("DeppMeng/ConditionalDETR" , a , pretrained=a ).eval()
a__ :Optional[int] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
a__ :Optional[Any] = "conditional_detr." + src
rename_key(a , a , a )
a__ :Dict = rename_backbone_keys(a )
# query, key and value matrices need special treatment
read_in_q_k_v(a , is_panoptic=a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
a__ :str = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
a__ :List[Any] = state_dict.pop(a )
a__ :str = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
a__ :str = state_dict.pop(a )
a__ :str = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
a__ :int = state_dict.pop(a )
a__ :str = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
a__ :str = state_dict.pop(a )
a__ :Optional[Any] = val
# finally, create HuggingFace model and load state dict
a__ :str = ConditionalDetrForSegmentation(a ) if is_panoptic else ConditionalDetrForObjectDetection(a )
model.load_state_dict(a )
model.eval()
model.push_to_hub(repo_id=a , organization="DepuMeng" , commit_message="Add model" )
# verify our conversion
a__ :Union[str, Any] = conditional_detr(a )
a__ :Optional[int] = model(a )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
snake_case__ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 373 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
"""simple docstring"""
def wrapper(*SCREAMING_SNAKE_CASE_ : Dict , **SCREAMING_SNAKE_CASE_ : int ):
_UpperCAmelCase = timeit.default_timer()
_UpperCAmelCase = func(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = timeit.default_timer() - starttime
return delta
_UpperCAmelCase = func.__name__
return wrapper
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]=1_00 , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = seq_shapes or {}
for i in range(SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(SCREAMING_SNAKE_CASE__ , _ArrayXD ):
_UpperCAmelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Value ):
if v.dtype == "string":
_UpperCAmelCase = 'The small grey turtle was surprisingly fast when challenged.'
else:
_UpperCAmelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ):
while isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ):
_UpperCAmelCase = v.feature
_UpperCAmelCase = seq_shapes[k]
_UpperCAmelCase = np.random.rand(*SCREAMING_SNAKE_CASE__ ).astype(v.dtype )
_UpperCAmelCase = data
dummy_data.append((i, example) )
return dummy_data
def A__ ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any=1_00 , SCREAMING_SNAKE_CASE_ : int=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = generate_examples(SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes=SCREAMING_SNAKE_CASE__ )
with ArrowWriter(features=SCREAMING_SNAKE_CASE__ , path=SCREAMING_SNAKE_CASE__ ) as writer:
for key, record in dummy_data:
_UpperCAmelCase = features.encode_example(SCREAMING_SNAKE_CASE__ )
writer.write(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
_UpperCAmelCase = datasets.Dataset.from_file(filename=SCREAMING_SNAKE_CASE__ , info=datasets.DatasetInfo(features=SCREAMING_SNAKE_CASE__ ) )
return dataset | 32 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 | 0 |
def UpperCamelCase_( __magic_name__ : int , __magic_name__ : int ):
"""simple docstring"""
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10)) | 708 |
from __future__ import annotations
from math import pow, sqrt
def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(__magic_name__ , 2 ) - pow(__magic_name__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__magic_name__ , 2 ) - pow(__magic_name__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__magic_name__ , 2 ) + pow(__magic_name__ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 382 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =IFInpaintingPipeline
_lowerCamelCase =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowerCamelCase =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase =PipelineTesterMixin.required_optional_params - {'''latents'''}
def __snake_case ( self : int ):
return self._get_dummy_components()
def __snake_case ( self : Union[str, Any] , a__ : Any , a__ : str=0 ):
if str(_A ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(_A )
else:
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __snake_case ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __snake_case ( self : Optional[int] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __snake_case ( self : Optional[int] ):
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __snake_case ( self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __snake_case ( self : Tuple ):
self._test_save_load_local()
def __snake_case ( self : Any ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 51 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = XLMRobertaTokenizer
_UpperCamelCase : Dict = XLMRobertaTokenizerFast
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : Tuple = True
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : int = XLMRobertaTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : Tuple = '''<pad>'''
lowercase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __a ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1_002 )
def __a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = XLMRobertaTokenizer(_A , keep_accents=_A )
lowercase : List[Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase : List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase : List[Any] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase : Optional[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
lowercase : List[str] = self.tokenizer_class.from_pretrained(_A , **_A )
lowercase : str = tempfile.mkdtemp()
lowercase : Optional[int] = tokenizer_r.save_pretrained(_A )
lowercase : Any = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase : List[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
lowercase : Any = tokenizer_r.from_pretrained(_A )
lowercase : List[Any] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
lowercase : List[Any] = tempfile.mkdtemp()
lowercase : List[str] = tokenizer_r.save_pretrained(_A , legacy_format=_A )
lowercase : Optional[Any] = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
lowercase : str = tokenizer_r.from_pretrained(_A )
lowercase : List[str] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
lowercase : Union[str, Any] = tempfile.mkdtemp()
lowercase : int = tokenizer_r.save_pretrained(_A , legacy_format=_A )
lowercase : str = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase : Dict = tokenizer_r.from_pretrained(_A )
lowercase : List[Any] = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@cached_property
def __a ( self : Any ) -> Any:
"""simple docstring"""
return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' )
def __a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_A , f.name )
lowercase : List[str] = XLMRobertaTokenizer(f.name , keep_accents=_A )
lowercase : List[str] = pickle.dumps(_A )
pickle.loads(_A )
def __a ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase : Optional[Any] = self.get_tokenizer()
lowercase : str = self.get_rust_tokenizer()
lowercase : List[str] = '''I was born in 92000, and this is falsé.'''
lowercase : Tuple = tokenizer.tokenize(_A )
lowercase : Tuple = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowercase : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
lowercase : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
lowercase : int = self.get_rust_tokenizer()
lowercase : Tuple = tokenizer.encode(_A )
lowercase : Optional[Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def __a ( self : str ) -> str:
"""simple docstring"""
lowercase : int = '''Hello World!'''
lowercase : Union[str, Any] = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def __a ( self : Dict ) -> Tuple:
"""simple docstring"""
lowercase : Tuple = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowercase : str = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowercase : Optional[Any] = {'''input_ids''': [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , ) | 217 | 0 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list ):
def merge(UpperCAmelCase_ : list , UpperCAmelCase_ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(UpperCAmelCase_ ) <= 1:
return collection
A__ = len(UpperCAmelCase_ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_ : List[Any] = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ : List[str] = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 711 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _snake_case ( UpperCAmelCase_ : int ):
A__ = 0
while number > 0:
A__ = number % 10
sum_of_digits += last_digit
A__ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _snake_case ( UpperCAmelCase_ : int = 100 ):
A__ = factorial(UpperCAmelCase_ )
A__ = split_and_add(UpperCAmelCase_ )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 500 | 0 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
__SCREAMING_SNAKE_CASE : Optional[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__SCREAMING_SNAKE_CASE : int = CLIPImageProcessor()
__SCREAMING_SNAKE_CASE : Dict = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
__SCREAMING_SNAKE_CASE : Optional[Any] = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 244 | '''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__SCREAMING_SNAKE_CASE : Optional[int] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: int = PegasusConfig
__UpperCamelCase: List[Any] = {}
__UpperCamelCase: Dict = "gelu"
def __init__( self : int , A : Optional[int] , A : str=13 , A : List[str]=7 , A : Optional[int]=True , A : Union[str, Any]=False , A : List[str]=99 , A : Any=32 , A : Tuple=5 , A : Optional[int]=4 , A : Tuple=37 , A : str=0.1 , A : Optional[Any]=0.1 , A : Dict=20 , A : int=2 , A : List[str]=1 , A : Optional[Any]=0 , ):
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : Optional[Any] = is_training
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : List[str] = eos_token_id
_UpperCAmelCase : Dict = pad_token_id
_UpperCAmelCase : int = bos_token_id
def _A ( self : Optional[int] ):
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase : Tuple = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase : Dict = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase : List[str] = prepare_pegasus_inputs_dict(A , A , A )
return config, inputs_dict
def _A ( self : Any , A : str , A : Optional[Any] , A : Optional[Any] ):
_UpperCAmelCase : Optional[int] = 20
_UpperCAmelCase : Optional[Any] = model_class_name(A )
_UpperCAmelCase : str = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase : Any = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , A , A )
_UpperCAmelCase : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_UpperCAmelCase : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
_UpperCAmelCase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
_UpperCAmelCase : Optional[Any] = model.decode(A , A )
_UpperCAmelCase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def _A ( self : Tuple , A : Union[str, Any] , A : Optional[Any] , A : int ):
_UpperCAmelCase : Optional[int] = 20
_UpperCAmelCase : Tuple = model_class_name(A )
_UpperCAmelCase : Dict = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase : List[str] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , A , A )
_UpperCAmelCase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : Any = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
_UpperCAmelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
_UpperCAmelCase : Optional[int] = model.decode(A , A , decoder_attention_mask=A )
_UpperCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Any=None , ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase : Any = np.not_equal(_UpperCAmelCase , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase : int = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase_ (snake_case__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase: Any = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCamelCase: Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCamelCase: List[str] = True
__UpperCamelCase: List[Any] = False
__UpperCamelCase: Any = False
__UpperCamelCase: str = False
def _A ( self : List[Any] ):
_UpperCAmelCase : int = FlaxPegasusModelTester(self )
_UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=A )
def _A ( self : List[str] ):
self.config_tester.run_common_tests()
def _A ( self : int ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def _A ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def _A ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Optional[Any] = self._prepare_for_class(A , A )
_UpperCAmelCase : List[Any] = model_class(A )
@jax.jit
def encode_jitted(A : List[Any] , A : List[Any]=None , **A : Optional[int] ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : int = encode_jitted(**A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : List[Any] = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def _A ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : Union[str, Any] = model_class(A )
_UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_UpperCAmelCase : Optional[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(A : List[Any] , A : Optional[int] , A : Optional[int] ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Any = decode_jitted(**A ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : List[str] = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _A ( self : List[str] ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class_name.from_pretrained("google/pegasus-large" , from_pt=A )
_UpperCAmelCase : List[Any] = np.ones((1, 1) )
_UpperCAmelCase : Dict = model(A )
self.assertIsNotNone(A )
@slow
def _A ( self : Optional[int] ):
_UpperCAmelCase : List[Any] = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
_UpperCAmelCase : Any = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
_UpperCAmelCase : Dict = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_UpperCAmelCase : Dict = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_UpperCAmelCase : List[str] = tokenizer(A , return_tensors="np" , truncation=A , max_length=512 , padding=A )
_UpperCAmelCase : List[str] = model.generate(**A , num_beams=2 ).sequences
_UpperCAmelCase : List[str] = tokenizer.batch_decode(A , skip_special_tokens=A )
assert tgt_text == decoded
| 244 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
UpperCAmelCase_ : int = {'''target_lang''': '''fi''', '''source_lang''': '''en'''}
UpperCAmelCase_ : str = '''>>zh<<'''
UpperCAmelCase_ : List[str] = '''Helsinki-NLP/'''
if is_torch_available():
UpperCAmelCase_ : List[Any] = '''pt'''
elif is_tf_available():
UpperCAmelCase_ : Tuple = '''tf'''
else:
UpperCAmelCase_ : Union[str, Any] = '''jax'''
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : List[Any] = MarianTokenizer
snake_case__ : str = False
snake_case__ : Union[str, Any] = True
def _A ( self : Optional[int] ):
super().setUp()
UpperCamelCase :Dict = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
UpperCamelCase :int = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
UpperCamelCase :Dict = Path(self.tmpdirname )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
UpperCamelCase :List[str] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _A ( self : Optional[int] , **__lowerCamelCase : List[str] ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _A ( self : str , __lowerCamelCase : Union[str, Any] ):
return (
"This is a test",
"This is a test",
)
def _A ( self : Optional[Any] ):
UpperCamelCase :Optional[int] = """</s>"""
UpperCamelCase :Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def _A ( self : List[str] ):
UpperCamelCase :Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__lowerCamelCase ) , 9 )
def _A ( self : int ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def _A ( self : Optional[int] ):
UpperCamelCase :Union[str, Any] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
UpperCamelCase :Any = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[int] = [38, 121, 14, 697, 38_848, 0]
self.assertListEqual(__lowerCamelCase , batch.input_ids[0] )
UpperCamelCase :List[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__lowerCamelCase )
UpperCamelCase :Tuple = [x.name for x in Path(__lowerCamelCase ).glob("""*""" )]
self.assertIn("""source.spm""" , __lowerCamelCase )
MarianTokenizer.from_pretrained(__lowerCamelCase )
def _A ( self : Union[str, Any] ):
UpperCamelCase :Optional[Any] = self.get_tokenizer()
UpperCamelCase :Optional[int] = tok(
["""I am a small frog""" * 1_000, """I am a small frog"""] , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def _A ( self : int ):
UpperCamelCase :int = self.get_tokenizer()
UpperCamelCase :List[Any] = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def _A ( self : str ):
# fmt: off
UpperCamelCase :List[Any] = {"""input_ids""": [[43_495, 462, 20, 42_164, 1_369, 52, 464, 132, 1_703, 492, 13, 7_491, 38_999, 6, 8, 464, 132, 1_703, 492, 13, 4_669, 37_867, 13, 7_525, 27, 1_593, 988, 13, 33_972, 7_029, 6, 20, 8_251, 383, 2, 270, 5_866, 3_788, 2, 2_353, 8_251, 12_338, 2, 13_958, 387, 2, 3_629, 6_953, 188, 2_900, 2, 13_958, 8_011, 11_501, 23, 8_460, 4_073, 34_009, 20, 435, 11_439, 27, 8, 8_460, 4_073, 6_004, 20, 9_988, 375, 27, 33, 266, 1_945, 1_076, 1_350, 37_867, 3_288, 5, 577, 1_076, 4_374, 8, 5_082, 5, 26_453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10_767, 6, 316, 304, 4_239, 3, 0], [148, 15_722, 19, 1_839, 12, 1_350, 13, 22_327, 5_082, 5_418, 47_567, 35_938, 59, 318, 19_552, 108, 2_183, 54, 14_976, 4_835, 32, 547, 1_114, 8, 315, 2_417, 5, 92, 19_088, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100], [36, 6_395, 12_570, 39_147, 11_597, 6, 266, 4, 45_405, 7_296, 3, 0, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100, 58_100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def _A ( self : Dict ):
UpperCamelCase :Tuple = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
UpperCamelCase :Tuple = """Tämä on testi"""
UpperCamelCase :str = """This is a test"""
UpperCamelCase :List[str] = [76, 7, 2_047, 2]
UpperCamelCase :List[str] = [69, 12, 11, 940, 2]
UpperCamelCase :int = tokenizer(__lowerCamelCase ).input_ids
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Dict = tokenizer(text_target=__lowerCamelCase ).input_ids
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
| 590 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : str = '''▁'''
UpperCAmelCase_ : str = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
UpperCAmelCase_ : List[str] = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
UpperCAmelCase_ : Any = {
'''facebook/s2t-small-librispeech-asr''': 10_24,
}
UpperCAmelCase_ : Tuple = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
UpperCAmelCase_ : int = {'''mustc''': MUSTC_LANGS}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Union[str, Any] = MAX_MODEL_INPUT_SIZES
snake_case__ : Optional[int] = ["""input_ids""", """attention_mask"""]
snake_case__ : List[int] = []
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]="<s>" , __lowerCamelCase : List[Any]="</s>" , __lowerCamelCase : str="<pad>" , __lowerCamelCase : Optional[Any]="<unk>" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : int , ):
UpperCamelCase :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_upper_case=__lowerCamelCase , do_lower_case=__lowerCamelCase , tgt_lang=__lowerCamelCase , lang_codes=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
UpperCamelCase :List[str] = do_upper_case
UpperCamelCase :int = do_lower_case
UpperCamelCase :Dict = load_json(__lowerCamelCase )
UpperCamelCase :Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCamelCase :Optional[Any] = spm_file
UpperCamelCase :str = load_spm(__lowerCamelCase , self.sp_model_kwargs )
if lang_codes is not None:
UpperCamelCase :Dict = lang_codes
UpperCamelCase :List[str] = LANGUAGES[lang_codes]
UpperCamelCase :List[Any] = [F"""<lang:{lang}>""" for lang in self.langs]
UpperCamelCase :int = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
UpperCamelCase :Union[str, Any] = self.lang_tokens
UpperCamelCase :Tuple = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
UpperCamelCase :Optional[Any] = {}
@property
def _A ( self : Any ):
return len(self.encoder )
@property
def _A ( self : int ):
return self._tgt_lang
@tgt_lang.setter
def _A ( self : Union[str, Any] , __lowerCamelCase : int ):
UpperCamelCase :str = new_tgt_lang
self.set_tgt_lang_special_tokens(__lowerCamelCase )
def _A ( self : Dict , __lowerCamelCase : str ):
UpperCamelCase :int = self.lang_code_to_id[tgt_lang]
UpperCamelCase :Optional[int] = [lang_code_id]
def _A ( self : Union[str, Any] , __lowerCamelCase : str ):
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _A ( self : Optional[int] , __lowerCamelCase : List[str] ):
return self.encoder.get(__lowerCamelCase , self.encoder[self.unk_token] )
def _A ( self : Optional[int] , __lowerCamelCase : int ):
return self.decoder.get(__lowerCamelCase , self.unk_token )
def _A ( self : Union[str, Any] , __lowerCamelCase : List[str] ):
UpperCamelCase :Any = []
UpperCamelCase :Dict = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
UpperCamelCase :Dict = self.sp_model.decode(__lowerCamelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
UpperCamelCase :Dict = []
else:
current_sub_tokens.append(__lowerCamelCase )
UpperCamelCase :Dict = self.sp_model.decode(__lowerCamelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _A ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int]=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : Optional[Any] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
UpperCamelCase :Tuple = [1] * len(self.prefix_tokens )
UpperCamelCase :Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCamelCase )) + ([0] * len(__lowerCamelCase )) + suffix_ones
def _A ( self : Any ):
UpperCamelCase :Optional[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
UpperCamelCase :Optional[int] = self.__dict__.copy()
UpperCamelCase :List[str] = None
return state
def __setstate__( self : int , __lowerCamelCase : Dict ):
UpperCamelCase :List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase :int = {}
UpperCamelCase :Optional[int] = load_spm(self.spm_file , self.sp_model_kwargs )
def _A ( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
UpperCamelCase :Union[str, Any] = Path(__lowerCamelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
UpperCamelCase :Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
UpperCamelCase :str = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __lowerCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCamelCase , """wb""" ) as fi:
UpperCamelCase :Any = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (str(__lowerCamelCase ), str(__lowerCamelCase ))
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str , __magic_name__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
UpperCamelCase :int = sentencepiece.SentencePieceProcessor(**__magic_name__ )
spm.Load(str(__magic_name__ ) )
return spm
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> Union[Dict, List]:
"""simple docstring"""
with open(__magic_name__ , """r""" ) as f:
return json.load(__magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : str ) -> None:
"""simple docstring"""
with open(__magic_name__ , """w""" ) as f:
json.dump(__magic_name__ , __magic_name__ , indent=2 )
| 590 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def A ( __UpperCamelCase=None ) -> Union[str, Any]:
if subparsers is not None:
A__ = subparsers.add_parser('tpu-config' , description=_description )
else:
A__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
A__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=__UpperCamelCase , default=__UpperCamelCase , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=__UpperCamelCase , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=__UpperCamelCase , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
A__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=__UpperCamelCase , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=__UpperCamelCase )
return parser
def A ( __UpperCamelCase ) -> Optional[Any]:
A__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__UpperCamelCase ):
A__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
A__ = defaults.command_file
if not args.command and defaults.commands is not None:
A__ = defaults.commands
if not args.tpu_name:
A__ = defaults.tpu_name
if not args.tpu_zone:
A__ = defaults.tpu_zone
if args.accelerate_version == "dev":
A__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
A__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , __UpperCamelCase ):
A__ = f'''accelerate=={args.accelerate_version}'''
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
A__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __UpperCamelCase ):
A__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
A__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [f'''pip install {args.accelerate_version}''']
new_cmd += args.command
A__ = '; '.join(__UpperCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
A__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'''Running {" ".join(__UpperCamelCase )}''' )
return
subprocess.run(__UpperCamelCase )
print('Successfully setup pod.' )
def A ( ) -> Optional[Any]:
A__ = tpu_command_parser()
A__ = parser.parse_args()
tpu_command_launcher(__UpperCamelCase )
| 9 |
'''simple docstring'''
import cmath
import math
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->complex:
snake_case__ = math.radians(UpperCAmelCase_ )
snake_case__ = math.radians(UpperCAmelCase_ )
# Convert voltage and current to rectangular form
snake_case__ = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 | 0 |
import os
from pathlib import Path
def __lowerCamelCase ( ) -> Union[str, Any]:
from torch.utils.cpp_extension import load
UpperCamelCase = Path(_lowercase ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
UpperCamelCase = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , _lowercase , with_cuda=_lowercase , extra_include_paths=[str(_lowercase )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 170 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_snake_case = None
_snake_case = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_snake_case = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : bool =True
SCREAMING_SNAKE_CASE_ : Optional[str] =None
# Automatically constructed
SCREAMING_SNAKE_CASE_ : ClassVar[str] ="PIL.Image.Image"
SCREAMING_SNAKE_CASE_ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
SCREAMING_SNAKE_CASE_ : str =field(default="Image" , init=__magic_name__ , repr=__magic_name__ )
def __call__( self : Dict ):
"""simple docstring"""
return self.pa_type
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {"path": value, "bytes": None}
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return {"path": None, "bytes": value}
elif isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(SCREAMING_SNAKE_CASE__ )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
UpperCamelCase = {}
UpperCamelCase , UpperCamelCase = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase = PIL.Image.open(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = path.split('::' )[-1]
try:
UpperCamelCase = string_to_dict(SCREAMING_SNAKE_CASE__ , config.HUB_DATASETS_URL )['repo_id']
UpperCamelCase = token_per_repo_id.get(SCREAMING_SNAKE_CASE__ )
except ValueError:
UpperCamelCase = None
with xopen(SCREAMING_SNAKE_CASE__ , 'rb' , use_auth_token=SCREAMING_SNAKE_CASE__ ) as f:
UpperCamelCase = BytesIO(f.read() )
UpperCamelCase = PIL.Image.open(bytes_ )
else:
UpperCamelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.binary() )
UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.string() )
UpperCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
UpperCamelCase = storage.field('bytes' )
else:
UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
UpperCamelCase = storage.field('path' )
else:
UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.string() )
UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase = pa.array(
[encode_np_array(np.array(SCREAMING_SNAKE_CASE__ ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE__ ) , type=pa.string() )
UpperCamelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE__ , self.pa_type )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
with xopen(SCREAMING_SNAKE_CASE__ , 'rb' ) as f:
UpperCamelCase = f.read()
return bytes_
UpperCamelCase = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase = pa.array(
[os.path.basename(SCREAMING_SNAKE_CASE__ ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(SCREAMING_SNAKE_CASE__ , self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( _lowercase ) -> bytes:
UpperCamelCase = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase = image.format
else:
UpperCamelCase = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def __lowerCamelCase ( _lowercase ) -> dict:
if hasattr(_lowercase , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def __lowerCamelCase ( _lowercase ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
UpperCamelCase = array.dtype
UpperCamelCase = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
UpperCamelCase = dtype.kind
UpperCamelCase = dtype.itemsize
UpperCamelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCamelCase = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def __lowerCamelCase ( _lowercase ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
UpperCamelCase , UpperCamelCase = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCamelCase = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCamelCase = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 170 | 1 |
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
_UpperCAmelCase = load_file(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
_UpperCAmelCase = key.split('''.''' )[0].split(LORA_PREFIX_TEXT_ENCODER + '''_''' )[-1].split('''_''' )
_UpperCAmelCase = pipeline.text_encoder
else:
_UpperCAmelCase = key.split('''.''' )[0].split(LORA_PREFIX_UNET + '''_''' )[-1].split('''_''' )
_UpperCAmelCase = pipeline.unet
# find the target layer
_UpperCAmelCase = layer_infos.pop(0 )
while len(SCREAMING_SNAKE_CASE_ ) > -1:
try:
_UpperCAmelCase = curr_layer.__getattr__(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_UpperCAmelCase = layer_infos.pop(0 )
elif len(SCREAMING_SNAKE_CASE_ ) == 0:
break
except Exception:
if len(SCREAMING_SNAKE_CASE_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
_UpperCAmelCase = layer_infos.pop(0 )
_UpperCAmelCase = []
if "lora_down" in key:
pair_keys.append(key.replace('''lora_down''' , '''lora_up''' ) )
pair_keys.append(SCREAMING_SNAKE_CASE_ )
else:
pair_keys.append(SCREAMING_SNAKE_CASE_ )
pair_keys.append(key.replace('''lora_up''' , '''lora_down''' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
_UpperCAmelCase = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
_UpperCAmelCase = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).unsqueeze(2 ).unsqueeze(3 )
else:
_UpperCAmelCase = state_dict[pair_keys[0]].to(torch.floataa )
_UpperCAmelCase = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# update visited list
for item in pair_keys:
visited.append(SCREAMING_SNAKE_CASE_ )
return pipeline
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--base_model_path", default=None, type=str, required=True, help="Path to the base model in diffusers format."
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--lora_prefix_unet", default="lora_unet", type=str, help="The prefix of UNet weight in safetensors"
)
parser.add_argument(
"--lora_prefix_text_encoder",
default="lora_te",
type=str,
help="The prefix of text encoder weight in safetensors",
)
parser.add_argument("--alpha", default=0.75, type=float, help="The merging ratio in W = W0 + alpha * deltaW")
parser.add_argument(
"--to_safetensors", action="store_true", help="Whether to store pipeline in safetensors format or not."
)
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.base_model_path
UpperCAmelCase_ = args.checkpoint_path
UpperCAmelCase_ = args.dump_path
UpperCAmelCase_ = args.lora_prefix_unet
UpperCAmelCase_ = args.lora_prefix_text_encoder
UpperCAmelCase_ = args.alpha
UpperCAmelCase_ = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCAmelCase_ = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 32 |
from collections.abc import Sequence
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(a__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : Sequence[float] ,a__ : float ) -> float:
__A : Any = 0.0
for coeff in reversed(a__ ):
__A : List[str] = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase_ : str = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 17 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ : List[str] = logging.get_logger(__name__)
lowercase_ : List[str] = '''▁'''
lowercase_ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
lowercase_ : Dict = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
lowercase_ : List[str] = {'''vinai/bartpho-syllable''': 1024}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any]="<s>" , lowerCamelCase_ : int="</s>" , lowerCamelCase_ : Any="</s>" , lowerCamelCase_ : Optional[Any]="<s>" , lowerCamelCase_ : Union[str, Any]="<unk>" , lowerCamelCase_ : str="<pad>" , lowerCamelCase_ : Optional[Any]="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : Tuple , ):
'''simple docstring'''
_snake_case : List[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
_snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
_snake_case : Optional[Any] = vocab_file
_snake_case : Optional[int] = monolingual_vocab_file
_snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_snake_case : List[str] = {}
_snake_case : Union[str, Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
_snake_case : Optional[Any] = cnt
cnt += 1
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
_snake_case : Tuple = line.strip().split()[0]
_snake_case : Optional[int] = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
_snake_case : int = len(self.fairseq_tokens_to_ids )
_snake_case : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Tuple ):
'''simple docstring'''
_snake_case : Tuple = self.__dict__.copy()
_snake_case : Optional[int] = None
_snake_case : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_snake_case : Optional[int] = {}
_snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case : Any = [self.cls_token_id]
_snake_case : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : int = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Dict ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
_snake_case : str = ''.join(lowerCamelCase_ ).replace(lowerCamelCase_ , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case : Any = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : List[Any] = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_ , 'wb' ) as fi:
_snake_case : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , lowerCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase_ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 652 |
from __future__ import annotations
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Dict = list(range(len(__lowerCAmelCase ) ) )
_snake_case : Optional[int] = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )]
index.sort(key=lambda __lowerCAmelCase : ratio[i] , reverse=__lowerCAmelCase )
_snake_case : float = 0
_snake_case : list[float] = [0] * len(__lowerCAmelCase )
for i in index:
if weight[i] <= capacity:
_snake_case : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_snake_case : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
from __future__ import annotations
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ ):
A__ = data
A__ = None
A__ = None
def UpperCamelCase ( _A : Node | None )-> None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def UpperCamelCase ( _A : Node | None )-> int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def UpperCamelCase ( _A : Node )-> bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def UpperCamelCase ( )-> None: # Main function for testing.
"""simple docstring"""
A__ = Node(1 )
A__ = Node(2 )
A__ = Node(3 )
A__ = Node(4 )
A__ = Node(5 )
A__ = Node(6 )
A__ = Node(7 )
A__ = Node(8 )
A__ = Node(9 )
print(is_full_binary_tree(_A ) )
print(depth_of_tree(_A ) )
print("Tree is: " )
display(_A )
if __name__ == "__main__":
main()
| 491 |
from __future__ import annotations
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ ):
A__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(UpperCAmelCase__ ) != 0:
A__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase__ ) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase__ , (int, float) ):
raise error
A__ = rows
else:
A__ = []
def __A ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def __A ( self ):
return len(self.rows )
@property
def __A ( self ):
return len(self.rows[0] )
@property
def __A ( self ):
return (self.num_rows, self.num_columns)
@property
def __A ( self ):
return self.order[0] == self.order[1]
def __A ( self ):
A__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(UpperCAmelCase__ )
def __A ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def __A ( self ):
return bool(self.determinant() )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(UpperCAmelCase__ ).determinant()
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase__ , UpperCAmelCase__ )
return -1 * self.get_minor(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ):
return Matrix(
[
[self.get_minor(UpperCAmelCase__ , UpperCAmelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def __A ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def __A ( self ):
A__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(UpperCAmelCase__ )
def __A ( self ):
A__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(UpperCAmelCase__ ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase__ , (int, float) ):
raise type_error
if len(UpperCAmelCase__ ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(UpperCAmelCase__ )
else:
A__ = self.rows[0:position] + [row] + self.rows[position:]
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
A__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase__ , (int, float) ):
raise type_error
if len(UpperCAmelCase__ ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
A__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
A__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , UpperCAmelCase__ ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , UpperCAmelCase__ ):
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , UpperCAmelCase__ ):
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , UpperCAmelCase__ ):
if isinstance(UpperCAmelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase__ , UpperCAmelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self , UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
A__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def __A ( cls , UpperCAmelCase__ , UpperCAmelCase__ ):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 491 | 1 |
"""simple docstring"""
import os
from pathlib import Path
def a__ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCamelCase = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCamelCase = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
lowerCamelCase = F'{src_lang}-{tgt_lang}'
lowerCamelCase = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(snake_case__ , exist_ok=snake_case__ )
lowerCamelCase = os.path.join(snake_case__ , """README.md""" )
print(F'Generating {path}' )
with open(snake_case__ , """w""" , encoding="""utf-8""" ) as f:
f.write(snake_case__ )
# make sure we are under the root of the project
lowerCAmelCase : str = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase : int = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase : Tuple = model_name.split("""-""")
lowerCAmelCase : Union[str, Any] = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 717 |
"""simple docstring"""
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def a__ ( snake_case__ ) -> Dict:
lowerCamelCase = [False] * len(snake_case__ )
lowerCamelCase = [-1] * len(snake_case__ )
def dfs(snake_case__ , snake_case__ ):
lowerCamelCase = True
lowerCamelCase = c
for u in graph[v]:
if not visited[u]:
dfs(snake_case__ , 1 - c )
for i in range(len(snake_case__ ) ):
if not visited[i]:
dfs(snake_case__ , 0 )
for i in range(len(snake_case__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCAmelCase : str = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 533 | 0 |
"""simple docstring"""
import torch
def _snake_case ( ):
"""simple docstring"""
if torch.cuda.is_available():
_lowerCamelCase : Tuple = torch.cuda.device_count()
else:
_lowerCamelCase : str = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 88 |
import os
def __lowerCAmelCase ( ):
UpperCAmelCase_ = os.path.dirname(os.path.realpath(A ) )
UpperCAmelCase_ = os.path.join(A , "triangle.txt" )
with open(A ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
for line in triangle:
UpperCAmelCase_ = []
for number in line.strip().split(" " ):
numbers_from_line.append(int(A ) )
a.append(A )
for i in range(1 , len(A ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(A , A )
return max(a[-1] )
if __name__ == "__main__":
print(solution()) | 162 | 0 |
from PIL import Image
def __A ( _A , _A ):
"""simple docstring"""
__a = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_A ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_A )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
SCREAMING_SNAKE_CASE : str = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 525 | from __future__ import annotations
SCREAMING_SNAKE_CASE : Optional[int] = []
def __A ( _A , _A , _A ):
"""simple docstring"""
for i in range(len(_A ) ):
if board[row][i] == 1:
return False
for i in range(len(_A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_A , -1 , -1 ) , range(_A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_A , -1 , -1 ) , range(_A , len(_A ) ) ):
if board[i][j] == 1:
return False
return True
def __A ( _A , _A ):
"""simple docstring"""
if row >= len(_A ):
solution.append(_A )
printboard(_A )
print()
return True
for i in range(len(_A ) ):
if is_safe(_A , _A , _A ):
__a = 1
solve(_A , row + 1 )
__a = 0
return False
def __A ( _A ):
"""simple docstring"""
for i in range(len(_A ) ):
for j in range(len(_A ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
SCREAMING_SNAKE_CASE : List[str] = 8
SCREAMING_SNAKE_CASE : str = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 525 | 1 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( _lowerCamelCase ):
lowerCamelCase_ : Optional[Any] = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ : List[str] = '''FlavaImageProcessor'''
lowerCamelCase_ : List[Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__(self , __magic_name__=None , __magic_name__=None , **__magic_name__ ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __magic_name__ , )
snake_case_ : Optional[Any] = kwargs.pop('''feature_extractor''' )
snake_case_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__magic_name__ , __magic_name__ )
snake_case_ : Optional[int] = self.image_processor
def __call__(self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = True , __magic_name__ = False , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 0 , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = False , __magic_name__ = True , __magic_name__ = None , **__magic_name__ , ) -> int:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case_ : Optional[int] = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if images is not None:
snake_case_ : Dict = self.image_processor(
__magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if text is not None and images is not None:
encoding.update(__magic_name__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase (self , *__magic_name__ , **__magic_name__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __magic_name__ , )
return self.image_processor_class
@property
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __magic_name__ , )
return self.image_processor
| 60 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase : str = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_UpperCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 396 | 0 |
def lowerCamelCase_ ( lowerCAmelCase__ : Dict ) -> Optional[int]:
'''simple docstring'''
A = [0 for i in range(len(_UpperCAmelCase ) )]
# initialize interval's left pointer and right pointer
A , A = 0, 0
for i in range(1 , len(_UpperCAmelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
A = min(right_pointer - i + 1 , z_result[i - left_pointer] )
A = min_edge
while go_next(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
A , A = i, i + z_result[i] - 1
return z_result
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return i + z_result[i] < len(_UpperCAmelCase ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str ) -> Dict:
'''simple docstring'''
A = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
A = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_UpperCAmelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod() | 716 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCamelCase_ ( lowerCAmelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
A , A = emb.weight.shape
A = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
A = emb.weight.data
return lin_layer
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
A = {}
for old_key in state_dict.keys():
A = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A = key.replace('moe_layer.experts.0' , F'''ffn.experts.expert_{expert_idx}''' )
else:
A = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
A = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
A = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
A = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
A = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
A = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
A = key.replace('final_layer_norm' , 'ff_layer_norm' )
A = state_dict[old_key]
return new_dict
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str = WEIGHTS_NAME ) -> List[str]:
'''simple docstring'''
A = []
A = 0
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
for expert in range(lowerCAmelCase__ ):
A = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(lowerCAmelCase__ ):
A = torch.load(lowerCAmelCase__ )['model']
remove_ignore_keys_(lowerCAmelCase__ )
A = rename_fairseq_keys(lowerCAmelCase__ , lowerCAmelCase__ )
A = os.path.join(
lowerCAmelCase__ , weights_name.replace('.bin' , F'''-{len(lowerCAmelCase__ )+1:05d}-of-???.bin''' ) )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(lowerCAmelCase__ )[0]].dtype )
# Add the last block
A = os.path.join(lowerCAmelCase__ , weights_name.replace('.bin' , F'''-{len(lowerCAmelCase__ )+1:05d}-of-???.bin''' ) )
A = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(lowerCAmelCase__ )
A = rename_fairseq_keys(lowerCAmelCase__ , lowerCAmelCase__ )
A = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(lowerCAmelCase__ ) == 1:
A = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
# Otherwise, let's build the index
A = {}
for idx, shard in enumerate(lowerCAmelCase__ ):
A = weights_name.replace('.bin' , F'''-{idx+1:05d}-of-{len(lowerCAmelCase__ ):05d}.bin''' )
A = os.path.join(lowerCAmelCase__ , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
for key in shard:
A = shard_file
# Add the metadata
A = {'total_size': total_size}
A = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , 'w' , encoding='utf-8' ) as f:
A = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '\n'
f.write(lowerCAmelCase__ )
return metadata, index
if __name__ == "__main__":
__snake_case :Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__snake_case :int =parser.parse_args()
__snake_case , __snake_case :List[Any] =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__snake_case :Dict =NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__snake_case :Optional[int] =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path) | 224 | 0 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def __a ( lowerCAmelCase__ : str ):
a__ : str = git.Repo(search_parent_directories=lowerCAmelCase__ )
a__ : Union[str, Any] = {
'''repo_id''': str(lowerCAmelCase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase__ , '''git_log.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=4 )
def __a ( lowerCAmelCase__ : List[Any] ):
if params.n_gpu <= 0:
a__ : List[Any] = 0
a__ : List[str] = -1
a__ : Optional[Any] = True
a__ : Optional[Any] = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
a__ : Optional[Any] = int(os.environ['''WORLD_SIZE'''] )
a__ : str = int(os.environ['''N_GPU_NODE'''] )
a__ : Optional[int] = int(os.environ['''RANK'''] )
# number of nodes / node ID
a__ : List[Any] = params.world_size // params.n_gpu_per_node
a__ : str = params.global_rank // params.n_gpu_per_node
a__ : Union[str, Any] = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
a__ : List[str] = 1
a__ : Tuple = 0
a__ : Optional[int] = 0
a__ : Union[str, Any] = 0
a__ : str = 1
a__ : Any = 1
a__ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
a__ : str = params.node_id == 0 and params.local_rank == 0
a__ : Optional[int] = params.n_nodes > 1
# summary
a__ : str = F'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def __a ( lowerCAmelCase__ : Union[str, Any] ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 688 |
'''simple docstring'''
def __a ( lowerCAmelCase__ : list , lowerCAmelCase__ : list , lowerCAmelCase__ : int ):
a__ : List[str] = len(lowerCAmelCase__ )
a__ : int = [[0] * n for i in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
a__ : Dict = y_points[i]
for i in range(2 , lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : Any = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 688 | 1 |
"""simple docstring"""
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: int ) -> int:
'''simple docstring'''
return number | (1 << position)
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: int ) -> int:
'''simple docstring'''
return number & ~(1 << position)
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: int ) -> int:
'''simple docstring'''
return number ^ (1 << position)
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: int ) -> bool:
'''simple docstring'''
return ((number >> position) & 1) == 1
def lowerCAmelCase ( UpperCamelCase_: int , UpperCamelCase_: int ) -> int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 612 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = ['image_processor', 'tokenizer']
lowerCamelCase : Union[str, Any] = 'CLIPImageProcessor'
lowerCamelCase : Union[str, Any] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Dict = kwargs.pop('feature_extractor' )
__lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Dict:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images is not None:
__lowerCamelCase : List[str] = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = self.tokenizer.model_input_names
__lowerCamelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase_ ( self ) -> Dict:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def lowercase_ ( self ) -> List[str]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 13 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class a_ :
def __init__( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : int=13 , snake_case__ : List[str]=7 , snake_case__ : Any=True , snake_case__ : Any=True , snake_case__ : Dict=True , snake_case__ : List[Any]=True , snake_case__ : List[str]=99 , snake_case__ : Any=32 , snake_case__ : List[str]=2 , snake_case__ : Any=4 , snake_case__ : Dict=37 , snake_case__ : Optional[int]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Optional[int]=512 , snake_case__ : Union[str, Any]=16 , snake_case__ : str=2 , snake_case__ : Dict=0.02 , snake_case__ : Tuple=3 , snake_case__ : List[Any]=4 , snake_case__ : List[Any]=None , snake_case__ : str=0 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = projection_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
lowerCAmelCase__ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any] ):
lowerCAmelCase__ = TFDPRContextEncoder(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : str , snake_case__ : Dict , snake_case__ : str , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Dict ):
lowerCAmelCase__ = TFDPRQuestionEncoder(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , token_type_ids=snake_case__ )
lowerCAmelCase__ = model(snake_case__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : Tuple , snake_case__ : Any , snake_case__ : str , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Tuple ):
lowerCAmelCase__ = TFDPRReader(config=snake_case__ )
lowerCAmelCase__ = model(snake_case__ , attention_mask=snake_case__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"""input_ids""": input_ids}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Any = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Any ):
lowerCAmelCase__ = TFDPRModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRContextEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFDPRReader.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@require_tf
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = TFDPRQuestionEncoder.from_pretrained("""facebook/dpr-question_encoder-single-nq-base""" )
lowerCAmelCase__ = tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
lowerCAmelCase__ = model(snake_case__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
lowerCAmelCase__ = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 644 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : list ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] =False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCamelCase : int =True
for i in range(0 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCamelCase : Union[str, Any] =input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCamelCase : str =False
for i in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCamelCase : str =input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCamelCase : Tuple =False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
_UpperCamelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
_UpperCamelCase = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 705 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCamelCase = logging.get_logger(__name__)
# General docstring
_UpperCamelCase = 'RegNetConfig'
# Base docstring
_UpperCamelCase = 'facebook/regnet-y-040'
_UpperCamelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCamelCase = 'facebook/regnet-y-040'
_UpperCamelCase = 'tabby, tabby cat'
_UpperCamelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Dict , __lowercase :int , __lowercase :int = 3 , __lowercase :int = 1 , __lowercase :int = 1 , __lowercase :Optional[str] = "relu" , **__lowercase :int , ):
super().__init__(**__lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase : int =tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__lowerCamelCase : str =tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=__lowercase , strides=__lowercase , padding='''VALID''' , groups=__lowercase , use_bias=__lowercase , name='''convolution''' , )
__lowerCamelCase : str =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
__lowerCamelCase : Optional[int] =ACTaFN[activation] if activation is not None else tf.identity
def __lowercase ( self :Optional[int] , __lowercase :Any ):
__lowerCamelCase : str =self.convolution(self.padding(__lowercase ) )
__lowerCamelCase : Optional[int] =self.normalization(__lowercase )
__lowerCamelCase : Any =self.activation(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , **__lowercase :Any ):
super().__init__(**__lowercase )
__lowerCamelCase : Tuple =config.num_channels
__lowerCamelCase : Union[str, Any] =TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def __lowercase ( self :int , __lowercase :List[str] ):
__lowerCamelCase : int =shape_list(__lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase : Union[str, Any] =tf.transpose(__lowercase , perm=(0, 2, 3, 1) )
__lowerCamelCase : Optional[int] =self.embedder(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :List[Any] , __lowercase :int , __lowercase :int = 2 , **__lowercase :Optional[int] ):
super().__init__(**__lowercase )
__lowerCamelCase : int =tf.keras.layers.ConvaD(
filters=__lowercase , kernel_size=1 , strides=__lowercase , use_bias=__lowercase , name='''convolution''' )
__lowerCamelCase : List[str] =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def __lowercase ( self :Optional[Any] , __lowercase :tf.Tensor , __lowercase :bool = False ):
return self.normalization(self.convolution(__lowercase ) , training=__lowercase )
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Dict , __lowercase :int , __lowercase :int , **__lowercase :List[str] ):
super().__init__(**__lowercase )
__lowerCamelCase : int =tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name='''pooler''' )
__lowerCamelCase : int =[
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=__lowercase , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def __lowercase ( self :Dict , __lowercase :Union[str, Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase : Any =self.pooler(__lowercase )
for layer_module in self.attention:
__lowerCamelCase : Any =layer_module(__lowercase )
__lowerCamelCase : Dict =hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Optional[int] , __lowercase :RegNetConfig , __lowercase :int , __lowercase :int , __lowercase :int = 1 , **__lowercase :str ):
super().__init__(**__lowercase )
__lowerCamelCase : Dict =in_channels != out_channels or stride != 1
__lowerCamelCase : int =max(1 , out_channels // config.groups_width )
__lowerCamelCase : List[str] =(
TFRegNetShortCut(__lowercase , stride=__lowercase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase : str =[
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name='''layer.2''' ),
]
__lowerCamelCase : Optional[int] =ACTaFN[config.hidden_act]
def __lowercase ( self :int , __lowercase :Optional[int] ):
__lowerCamelCase : List[Any] =hidden_state
for layer_module in self.layers:
__lowerCamelCase : str =layer_module(__lowercase )
__lowerCamelCase : List[Any] =self.shortcut(__lowercase )
hidden_state += residual
__lowerCamelCase : Optional[int] =self.activation(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , __lowercase :int , __lowercase :int , __lowercase :int = 1 , **__lowercase :List[str] ):
super().__init__(**__lowercase )
__lowerCamelCase : Optional[Any] =in_channels != out_channels or stride != 1
__lowerCamelCase : Optional[Any] =max(1 , out_channels // config.groups_width )
__lowerCamelCase : Dict =(
TFRegNetShortCut(__lowercase , stride=__lowercase , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
__lowerCamelCase : Union[str, Any] =[
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
__lowercase , stride=__lowercase , groups=__lowercase , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(__lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(__lowercase , kernel_size=1 , activation=__lowercase , name='''layer.3''' ),
]
__lowerCamelCase : Tuple =ACTaFN[config.hidden_act]
def __lowercase ( self :Tuple , __lowercase :Tuple ):
__lowerCamelCase : List[Any] =hidden_state
for layer_module in self.layers:
__lowerCamelCase : int =layer_module(__lowercase )
__lowerCamelCase : List[str] =self.shortcut(__lowercase )
hidden_state += residual
__lowerCamelCase : List[str] =self.activation(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :int , __lowercase :RegNetConfig , __lowercase :int , __lowercase :int , __lowercase :int = 2 , __lowercase :int = 2 , **__lowercase :Union[str, Any] ):
super().__init__(**__lowercase )
__lowerCamelCase : List[str] =TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
__lowerCamelCase : List[Any] =[
# downsampling is done in the first layer with stride of 2
layer(__lowercase , __lowercase , __lowercase , stride=__lowercase , name='''layers.0''' ),
*[layer(__lowercase , __lowercase , __lowercase , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def __lowercase ( self :int , __lowercase :List[str] ):
for layer_module in self.layers:
__lowerCamelCase : int =layer_module(__lowercase )
return hidden_state
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :List[Any] , __lowercase :RegNetConfig , **__lowercase :List[str] ):
super().__init__(**__lowercase )
__lowerCamelCase : Optional[int] =[]
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
__lowerCamelCase : Any =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__lowercase , __lowercase , __lowercase , depth=__lowercase , name=f'stages.{i+1}' ) )
def __lowercase ( self :str , __lowercase :tf.Tensor , __lowercase :bool = False , __lowercase :bool = True ):
__lowerCamelCase : Optional[Any] =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase : Dict =hidden_states + (hidden_state,)
__lowerCamelCase : List[Any] =stage_module(__lowercase )
if output_hidden_states:
__lowerCamelCase : Union[str, Any] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowercase , hidden_states=__lowercase )
@keras_serializable
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
__snake_case : Optional[int] = RegNetConfig
def __init__( self :List[Any] , __lowercase :Dict , **__lowercase :Union[str, Any] ):
super().__init__(**__lowercase )
__lowerCamelCase : int =config
__lowerCamelCase : List[str] =TFRegNetEmbeddings(__lowercase , name='''embedder''' )
__lowerCamelCase : List[str] =TFRegNetEncoder(__lowercase , name='''encoder''' )
__lowerCamelCase : List[Any] =tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowercase , name='''pooler''' )
@unpack_inputs
def __lowercase ( self :List[Any] , __lowercase :tf.Tensor , __lowercase :Optional[bool] = None , __lowercase :Optional[bool] = None , __lowercase :bool = False , ):
__lowerCamelCase : Union[str, Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple =self.embedder(__lowercase , training=__lowercase )
__lowerCamelCase : Optional[Any] =self.encoder(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase )
__lowerCamelCase : str =encoder_outputs[0]
__lowerCamelCase : Tuple =self.pooler(__lowercase )
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase : int =tf.transpose(__lowercase , perm=(0, 3, 1, 2) )
__lowerCamelCase : Any =tf.transpose(__lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase : str =tuple([tf.transpose(__lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowercase , pooler_output=__lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Optional[int] = RegNetConfig
__snake_case : int = """regnet"""
__snake_case : int = """pixel_values"""
@property
def __lowercase ( self :List[str] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_UpperCamelCase = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
_UpperCamelCase = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , *__lowercase :List[str] , **__lowercase :int ):
super().__init__(__lowercase , *__lowercase , **__lowercase )
__lowerCamelCase : Tuple =TFRegNetMainLayer(__lowercase , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowercase ( self :Optional[Any] , __lowercase :tf.Tensor , __lowercase :Optional[bool] = None , __lowercase :Optional[bool] = None , __lowercase :Optional[int]=False , ):
__lowerCamelCase : List[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Dict =self.regnet(
pixel_values=__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case__ , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , __lowercase :RegNetConfig , *__lowercase :List[Any] , **__lowercase :Dict ):
super().__init__(__lowercase , *__lowercase , **__lowercase )
__lowerCamelCase : Optional[int] =config.num_labels
__lowerCamelCase : Optional[int] =TFRegNetMainLayer(__lowercase , name='''regnet''' )
# classification head
__lowerCamelCase : Union[str, Any] =[
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowercase ( self :List[Any] , __lowercase :tf.Tensor = None , __lowercase :tf.Tensor = None , __lowercase :bool = None , __lowercase :bool = None , __lowercase :int=False , ):
__lowerCamelCase : str =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : str =self.regnet(
__lowercase , output_hidden_states=__lowercase , return_dict=__lowercase , training=__lowercase )
__lowerCamelCase : Any =outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : List[str] =self.classifier[0](__lowercase )
__lowerCamelCase : str =self.classifier[1](__lowercase )
__lowerCamelCase : str =None if labels is None else self.hf_compute_loss(labels=__lowercase , logits=__lowercase )
if not return_dict:
__lowerCamelCase : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__lowercase , logits=__lowercase , hidden_states=outputs.hidden_states )
| 363 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
A_ = logging.get_logger(__name__)
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "upernet"
def __init__( self: str, a_: Dict=None, a_: Any=512, a_: Optional[Any]=0.02, a_: int=[1, 2, 3, 6], a_: List[Any]=True, a_: int=0.4, a_: Optional[int]=384, a_: int=256, a_: int=1, a_: str=False, a_: Optional[Any]=255, **a_: Optional[int], ):
'''simple docstring'''
super().__init__(**a_ )
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_snake_case : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
elif isinstance(a_, a_ ):
_snake_case : Tuple = backbone_config.get("""model_type""" )
_snake_case : Any = CONFIG_MAPPING[backbone_model_type]
_snake_case : Optional[int] = config_class.from_dict(a_ )
_snake_case : List[str] = backbone_config
_snake_case : Union[str, Any] = hidden_size
_snake_case : Any = initializer_range
_snake_case : Optional[Any] = pool_scales
_snake_case : Optional[Any] = use_auxiliary_head
_snake_case : int = auxiliary_loss_weight
_snake_case : Any = auxiliary_in_channels
_snake_case : Optional[int] = auxiliary_channels
_snake_case : List[str] = auxiliary_num_convs
_snake_case : Dict = auxiliary_concat_input
_snake_case : Tuple = loss_ignore_index
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : str = copy.deepcopy(self.__dict__ )
_snake_case : Dict = self.backbone_config.to_dict()
_snake_case : int = self.__class__.model_type
return output
| 609 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
A_ = logging.getLogger()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_snake_case : List[str] = parser.parse_args()
return args.f
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : int = {}
_snake_case : Tuple = os.path.join(snake_case__ , """all_results.json""" )
if os.path.exists(snake_case__ ):
with open(snake_case__ , """r""" ) as f:
_snake_case : Dict = json.load(snake_case__ )
else:
raise ValueError(F"can't find {path}" )
return results
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
A_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase( __a ):
'''simple docstring'''
@classmethod
def UpperCamelCase_ ( cls: Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = tempfile.mkdtemp()
_snake_case : Optional[Any] = os.path.join(cls.tmpdir, """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
_snake_case : Dict = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def UpperCamelCase_ ( cls: Optional[Any] ):
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_auto_remove_tmp_dir()
_snake_case : str = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
_snake_case : Tuple = get_results(a_ )
self.assertGreaterEqual(result["""eval_accuracy"""], 0.75 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
_snake_case : Optional[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_snake_case : Tuple = get_results(a_ )
self.assertLess(result["""perplexity"""], 100 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[str] = self.get_auto_remove_tmp_dir()
_snake_case : Dict = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_snake_case : Union[str, Any] = get_results(a_ )
self.assertLess(result["""perplexity"""], 42 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = 7 if get_gpu_count() > 1 else 2
_snake_case : str = self.get_auto_remove_tmp_dir()
_snake_case : Any = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_snake_case : List[str] = get_results(a_ )
self.assertGreaterEqual(result["""eval_accuracy"""], 0.75 )
self.assertLess(result["""train_loss"""], 0.5 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Tuple = self.get_auto_remove_tmp_dir()
_snake_case : List[str] = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_snake_case : Union[str, Any] = get_results(a_ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""], 28 )
self.assertGreaterEqual(result["""eval_exact"""], 28 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : Any = self.get_auto_remove_tmp_dir()
_snake_case : Optional[int] = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_snake_case : Tuple = get_results(a_ )
self.assertGreaterEqual(result["""eval_accuracy"""], 0.8 )
self.assertTrue(os.path.exists(os.path.join(a_, """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : int = self.get_auto_remove_tmp_dir()
_snake_case : Tuple = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_snake_case : Any = get_results(a_ )
self.assertGreaterEqual(result["""eval_rouge1"""], 10 )
self.assertGreaterEqual(result["""eval_rouge2"""], 2 )
self.assertGreaterEqual(result["""eval_rougeL"""], 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""], 7 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.get_auto_remove_tmp_dir()
_snake_case : Optional[int] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_snake_case : int = get_results(a_ )
self.assertGreaterEqual(result["""eval_bleu"""], 30 )
self.assertTrue(os.path.exists(os.path.join(a_, """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """translation_no_trainer""" ) ) )
@slow
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = logging.StreamHandler(sys.stdout )
logger.addHandler(a_ )
_snake_case : Optional[Any] = self.get_auto_remove_tmp_dir()
_snake_case : Optional[int] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
_snake_case : Tuple = get_results(a_ )
self.assertGreaterEqual(result["""eval_overall_accuracy"""], 0.10 )
@mock.patch.dict(os.environ, {"""WANDB_MODE""": """offline"""} )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
_snake_case : Union[str, Any] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
_snake_case : Dict = get_results(a_ )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""], 0.6 )
self.assertTrue(os.path.exists(os.path.join(a_, """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(a_, """image_classification_no_trainer""" ) ) )
| 609 | 1 |
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Optional[int] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: Union[str, Any] , _UpperCAmelCase: int ):
_lowerCAmelCase :Union[str, Any] = hidden_states.shape
_lowerCAmelCase :Optional[int] = jax.image.resize(
_UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
_lowerCAmelCase :Optional[Any] = self.conv(_UpperCAmelCase )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self: int , _UpperCAmelCase: Optional[Any] ):
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
_lowerCAmelCase :Optional[Any] = self.conv(_UpperCAmelCase )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ):
_lowerCAmelCase :int = self.in_channels if self.out_channels is None else self.out_channels
_lowerCAmelCase :Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowerCAmelCase :Optional[int] = nn.Conv(
_UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase :Tuple = nn.Dense(_UpperCAmelCase , dtype=self.dtype )
_lowerCAmelCase :Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_lowerCAmelCase :Union[str, Any] = nn.Dropout(self.dropout_prob )
_lowerCAmelCase :List[str] = nn.Conv(
_UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_lowerCAmelCase :List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
_lowerCAmelCase :List[Any] = None
if use_nin_shortcut:
_lowerCAmelCase :Union[str, Any] = nn.Conv(
_UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self: Dict , _UpperCAmelCase: List[Any] , _UpperCAmelCase: str , _UpperCAmelCase: Union[str, Any]=True ):
_lowerCAmelCase :Optional[int] = hidden_states
_lowerCAmelCase :List[str] = self.norma(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = nn.swish(_UpperCAmelCase )
_lowerCAmelCase :Union[str, Any] = self.conva(_UpperCAmelCase )
_lowerCAmelCase :List[str] = self.time_emb_proj(nn.swish(_UpperCAmelCase ) )
_lowerCAmelCase :Union[str, Any] = jnp.expand_dims(jnp.expand_dims(_UpperCAmelCase , 1 ) , 1 )
_lowerCAmelCase :Any = hidden_states + temb
_lowerCAmelCase :List[str] = self.norma(_UpperCAmelCase )
_lowerCAmelCase :List[Any] = nn.swish(_UpperCAmelCase )
_lowerCAmelCase :Any = self.dropout(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Dict = self.conva(_UpperCAmelCase )
if self.conv_shortcut is not None:
_lowerCAmelCase :str = self.conv_shortcut(_UpperCAmelCase )
return hidden_states + residual | 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["""MobileViTFeatureExtractor"""]
a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 382 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , _snake_case : Any , ) -> Optional[int]:
"""simple docstring"""
A_ = parent
A_ = 13
A_ = 7
A_ = True
A_ = True
A_ = True
A_ = True
A_ = True
A_ = False
A_ = False
A_ = False
A_ = 2
A_ = 99
A_ = 0
A_ = 32
A_ = 2
A_ = 4
A_ = 0.1
A_ = 0.1
A_ = 512
A_ = 16
A_ = 2
A_ = 0.0_2
A_ = 3
A_ = 4
A_ = "last"
A_ = True
A_ = None
A_ = 0
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
A_ = None
if self.use_input_lengths:
A_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCamelCase__ ( self : List[Any] , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : int , _snake_case : str , _snake_case : int , ) -> str:
"""simple docstring"""
A_ = TFFlaubertModel(config=lowercase_ )
A_ = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A_ = model(lowercase_ )
A_ = [input_ids, input_mask]
A_ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : List[str] , _snake_case : int , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Tuple , _snake_case : Tuple , _snake_case : Any , _snake_case : Dict , _snake_case : List[Any] , ) -> str:
"""simple docstring"""
A_ = TFFlaubertWithLMHeadModel(lowercase_ )
A_ = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
A_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Any , _snake_case : str , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : str , ) -> Union[str, Any]:
"""simple docstring"""
A_ = TFFlaubertForQuestionAnsweringSimple(lowercase_ )
A_ = {"input_ids": input_ids, "lengths": input_lengths}
A_ = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Optional[Any] , _snake_case : Any , _snake_case : int , _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : str , _snake_case : int , _snake_case : int , _snake_case : Dict , ) -> List[Any]:
"""simple docstring"""
A_ = TFFlaubertForSequenceClassification(lowercase_ )
A_ = {"input_ids": input_ids, "lengths": input_lengths}
A_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : List[Any] , _snake_case : Optional[Any] , _snake_case : Any , _snake_case : Any , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : Dict , _snake_case : int , _snake_case : Tuple , _snake_case : Union[str, Any] , ) -> List[Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFFlaubertForTokenClassification(config=lowercase_ )
A_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
A_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Dict , _snake_case : Tuple , _snake_case : Optional[int] , _snake_case : Any , _snake_case : List[Any] , _snake_case : int , _snake_case : Tuple , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Any , ) -> Dict:
"""simple docstring"""
A_ = self.num_choices
A_ = TFFlaubertForMultipleChoice(config=lowercase_ )
A_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
A_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
A_ = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case = False
snake_case = False
def lowerCamelCase__ ( self : str , _snake_case : Optional[Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCamelCase__ ( self : int ) -> str:
"""simple docstring"""
A_ = TFFlaubertModelTester(self )
A_ = ConfigTester(self , config_class=lowercase_ , emb_dim=37 )
def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_ )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_ )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_ )
def lowerCamelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_ )
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*lowercase_ )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*lowercase_ )
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = TFFlaubertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
A_ = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
A_ = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
A_ = model(lowercase_ )[0]
A_ = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , lowercase_ )
# compare the actual values for a slice.
A_ = tf.convert_to_tensor(
[
[
[-1.8_7_6_8_7_7_3, -1.5_6_6_5_5_5, 0.2_7_0_7_2_4_1_8],
[-1.6_9_2_0_0_3_8, -0.5_8_7_3_5_0_5, 1.9_3_2_9_5_9_9],
[-2.9_5_6_3_9_8_5, -1.6_9_9_3_8_3_5, 1.7_9_7_2_0_5_2],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 115 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class a :
def __init__( self : List[str] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int=None , lowercase_ : List[Any]=None ):
snake_case_ = start
snake_case_ = end
snake_case_ = val
snake_case_ = (start + end) // 2
snake_case_ = left
snake_case_ = right
def __repr__( self : Any ):
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class a :
def __init__( self : Any , lowercase_ : Sequence , lowercase_ : List[str] ):
snake_case_ = collection
snake_case_ = function
if self.collection:
snake_case_ = self._build_tree(0 , len(lowercase_ ) - 1 )
def A_ ( self : Any , lowercase_ : Dict , lowercase_ : Dict ):
self._update_tree(self.root , lowercase_ , lowercase_ )
def A_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] ):
return self._query_range(self.root , lowercase_ , lowercase_ )
def A_ ( self : str , lowercase_ : int , lowercase_ : int ):
if start == end:
return SegmentTreeNode(lowercase_ , lowercase_ , self.collection[start] )
snake_case_ = (start + end) // 2
snake_case_ = self._build_tree(lowercase_ , lowercase_ )
snake_case_ = self._build_tree(mid + 1 , lowercase_ )
return SegmentTreeNode(lowercase_ , lowercase_ , self.fn(left.val , right.val ) , lowercase_ , lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
if node.start == i and node.end == i:
snake_case_ = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase_ , lowercase_ )
else:
self._update_tree(node.right , lowercase_ , lowercase_ )
snake_case_ = self.fn(node.left.val , node.right.val )
def A_ ( self : Tuple , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Any ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase_ , lowercase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , lowercase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase_ , lowercase_ )
def A_ ( self : Any ):
if self.root is not None:
snake_case_ = Queue()
queue.put(self.root )
while not queue.empty():
snake_case_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
a : List[Any] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 640 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( __magic_name__ : Dict = 4_00_00_00 ) -> str:
'''simple docstring'''
snake_case__ : List[str] = []
snake_case__ , snake_case__ : Any = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__A )
snake_case__ , snake_case__ : int = b, a + b
return sum(__A )
if __name__ == "__main__":
print(F'{solution() = }')
| 701 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A_ : List[str] = logging.get_logger(__name__)
A_ : Optional[Any] = "▁"
A_ : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
A_ : Dict = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
A_ : Optional[int] = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
A_ : Union[str, Any] = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ = []
lowerCamelCase__ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : Optional[Any] = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
snake_case__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case__ : Union[str, Any] = legacy_behaviour
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
snake_case__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
snake_case__ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ : Dict = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ : Union[str, Any] = 1
snake_case__ : List[Any] = len(self.sp_model )
snake_case__ : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
snake_case__ : Any = {v: k for k, v in self.lang_code_to_id.items()}
snake_case__ : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case__ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case__ : Union[str, Any] = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
snake_case__ : List[str] = src_lang if src_lang is not None else """eng_Latn"""
snake_case__ : Optional[int] = self.lang_code_to_id[self._src_lang]
snake_case__ : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
snake_case__ : List[str] = self.__dict__.copy()
snake_case__ : Any = None
snake_case__ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ : Any = {}
snake_case__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCamelCase ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = [1] * len(self.prefix_tokens )
snake_case__ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
snake_case__ : Dict = [self.sep_token_id]
snake_case__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case__ : List[Any] = src_lang
snake_case__ : Optional[Any] = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
snake_case__ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ : Optional[int] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case__ : int = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
snake_case__ : int = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "eng_Latn" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "fra_Latn" , **__SCREAMING_SNAKE_CASE , ):
snake_case__ : Optional[int] = src_lang
snake_case__ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
snake_case__ : Tuple = []
snake_case__ : int = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ : Optional[int] = [self.cur_lang_code]
snake_case__ : int = [self.eos_token_id]
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = self.lang_code_to_id[lang]
if self.legacy_behaviour:
snake_case__ : str = []
snake_case__ : int = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ : List[Any] = [self.cur_lang_code]
snake_case__ : Union[str, Any] = [self.eos_token_id]
| 419 | 0 |
def A__ (snake_case : list ) -> Union[str, Any]:
__UpperCamelCase : Any = len(snake_case )
for _ in range(snake_case ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
__UpperCamelCase : Any = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a__ = list(range(10, 0, -1))
print(f"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
| 279 |
"""simple docstring"""
from string import ascii_lowercase, ascii_uppercase
def UpperCAmelCase ( snake_case : str ):
if not sentence:
return ""
_lowerCAmelCase:Tuple = dict(zip(snake_case , snake_case ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 227 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a : Optional[int] = logging.get_logger(__name__)
__a : List[str] = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
__a : List[Any] = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def UpperCAmelCase ( lowercase ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = torch.load(lowercase , map_location='''cpu''' )
return sd
def UpperCAmelCase ( lowercase , lowercase , lowercase=rename_keys_prefix ) -> Tuple:
"""simple docstring"""
__lowercase = OrderedDict()
__lowercase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowercase = key
for name_pair in rename_keys_prefix:
__lowercase = new_key.replace(name_pair[0] , name_pair[1] )
__lowercase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowercase = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def UpperCAmelCase ( lowercase , lowercase ) -> List[str]:
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowercase = '''pretraining'''
if "vcr" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 512}
__lowercase = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 2048}
__lowercase = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
__lowercase = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
__lowercase = '''vqa'''
elif "nlvr" in checkpoint_path:
__lowercase = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
__lowercase = '''nlvr'''
__lowercase = VisualBertConfig(**lowercase )
# Load State Dict
__lowercase = load_state_dict(lowercase )
__lowercase = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
__lowercase = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
__lowercase = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
__lowercase = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
__lowercase = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
__a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
__a : Optional[Any] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path) | 709 | import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__lowercase = deepcopy(lowerCAmelCase__ )
elif os.path.exists(lowerCAmelCase__ ):
with io.open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as f:
__lowercase = json.load(lowerCAmelCase__ )
else:
try:
__lowercase = baseaa.urlsafe_baadecode(lowerCAmelCase__ ).decode('''utf-8''' )
__lowercase = json.loads(lowerCAmelCase__ )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
F"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" )
__lowercase = config
self.set_stage_and_offload()
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.get_value('''zero_optimization.stage''' , -1 )
# offload
__lowercase = False
if self.is_zeroa() or self.is_zeroa():
__lowercase = set(['''cpu''', '''nvme'''] )
__lowercase = set(
[
self.get_value('''zero_optimization.offload_optimizer.device''' ),
self.get_value('''zero_optimization.offload_param.device''' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__lowercase = True
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
__lowercase = self.config
# find the config node of interest if it exists
__lowercase = ds_key_long.split('''.''' )
__lowercase = nodes.pop()
for node in nodes:
__lowercase = config.get(lowerCAmelCase__ )
if config is None:
return None, ds_key
return config, ds_key
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> str:
'''simple docstring'''
__lowercase , __lowercase = self.find_config_node(lowerCAmelCase__ )
if config is None:
return default
return config.get(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
'''simple docstring'''
__lowercase = self.config
# find the config node of interest if it exists
__lowercase = ds_key_long.split('''.''' )
for node in nodes:
__lowercase = config
__lowercase = config.get(lowerCAmelCase__ )
if config is None:
if must_exist:
raise ValueError(F"Can't find {ds_key_long} entry in the config: {self.config}" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_value(lowerCAmelCase__ )
return False if value is None else bool(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
__lowercase = self.get_value(lowerCAmelCase__ )
return False if value is None else not bool(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._stage == 2
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._stage == 3
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
return self._offload
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = engine
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
self.engine.backward(lowerCAmelCase__ , **lowerCAmelCase__ )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , device_placement=lowerCAmelCase__ , scaler=lowerCAmelCase__ )
__lowercase = hasattr(self.optimizer , '''overflow''' )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__=None ) -> List[Any]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=0.001 , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
__lowercase = params
__lowercase = lr
__lowercase = weight_decay
__lowercase = kwargs
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase = optimizer
__lowercase = total_num_steps
__lowercase = warmup_num_steps
__lowercase = kwargs | 522 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 9 |
"""simple docstring"""
__UpperCAmelCase = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__UpperCAmelCase = frozenset(['prompt', 'negative_prompt'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(['image'])
__UpperCAmelCase = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['image'])
__UpperCAmelCase = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__UpperCAmelCase = frozenset(['prompt', 'image', 'negative_prompt'])
__UpperCAmelCase = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
__UpperCAmelCase = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
__UpperCAmelCase = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['image', 'mask_image'])
__UpperCAmelCase = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
__UpperCAmelCase = frozenset(['example_image', 'image', 'mask_image'])
__UpperCAmelCase = frozenset(['class_labels'])
__UpperCAmelCase = frozenset(['class_labels'])
__UpperCAmelCase = frozenset(['batch_size'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(['batch_size'])
__UpperCAmelCase = frozenset([])
__UpperCAmelCase = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
__UpperCAmelCase = frozenset(['prompt', 'negative_prompt'])
__UpperCAmelCase = frozenset(['input_tokens'])
__UpperCAmelCase = frozenset(['input_tokens'])
| 65 | 0 |
"""simple docstring"""
import math
def A__ ( A__ ) -> bool:
'''simple docstring'''
_UpperCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(A__ )
def A__ ( A__ = 1 / 1_2345 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 3
while True:
_UpperCAmelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(A__ ):
_UpperCAmelCase = int(A__ )
total_partitions += 1
if check_partition_perfect(A__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(A__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 714 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : Optional[Any] = ["image_processor", "tokenizer"]
A__ : List[Any] = "BlipImageProcessor"
A__ : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , snake_case_ , snake_case_ ) -> Any:
_UpperCAmelCase = False
super().__init__(snake_case_ , snake_case_ )
_UpperCAmelCase = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
_UpperCAmelCase = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def __A ( self , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __A ( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __A ( self ) -> Any:
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 579 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if len(UpperCamelCase_ ) < k or k < 0:
raise ValueError("""Invalid Input""" )
__SCREAMING_SNAKE_CASE = __SCREAMING_SNAKE_CASE = sum(array[:k] )
for i in range(len(UpperCamelCase_ ) - k ):
__SCREAMING_SNAKE_CASE = current_sum - array[i] + array[i + k]
__SCREAMING_SNAKE_CASE = max(UpperCamelCase_ , UpperCamelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__magic_name__ = [randint(-1000, 1000) for i in range(100)]
__magic_name__ = randint(0, 110)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 155 |
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 155 | 1 |
"""simple docstring"""
import math
class lowerCamelCase__ :
'''simple docstring'''
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[Any]:
A = 0.0
A = 0.0
for i in range(len(UpperCAmelCase_ ) ):
da += math.pow((sample[i] - weights[0][i]) ,2 )
da += math.pow((sample[i] - weights[1][i]) ,2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Tuple:
for i in range(len(UpperCAmelCase_ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _A ( ) -> Any:
"""simple docstring"""
A = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
A = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
A = SelfOrganizingMap()
A = 3
A = 0.5
for _ in range(_snake_case ):
for j in range(len(_snake_case ) ):
# training sample
A = training_samples[j]
# Compute the winning vector
A = self_organizing_map.get_winner(_snake_case , _snake_case )
# Update the winning vector
A = self_organizing_map.update(_snake_case , _snake_case , _snake_case , _snake_case )
# classify test sample
A = [0, 0, 0, 1]
A = self_organizing_map.get_winner(_snake_case , _snake_case )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 711 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
UpperCAmelCase =get_logger(__name__)
UpperCAmelCase =R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowerCamelCase__ :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCamelCase__ :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> jnp.ndarray:
for processor in self:
A = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
A = processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ )
else:
A = processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> List[str]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
A = temperature
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = scores / self.temperature
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ = -float("""Inf""" ) ,lowerCamelCase_ = 1 ) -> Dict:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
A = top_p
A = filter_value
A = min_tokens_to_keep
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A , A = lax.top_k(lowerCamelCase_ ,scores.shape[-1] )
A = jnp.full_like(lowerCamelCase_ ,self.filter_value )
A = jax.nn.softmax(lowerCamelCase_ ,axis=-1 ).cumsum(axis=-1 )
A = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A = jnp.roll(lowerCamelCase_ ,1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase_ )
# min tokens to keep
A = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase_ )
A = jnp.where(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
A = jax.lax.sort_key_val(lowerCamelCase_ ,lowerCamelCase_ )[-1]
return next_scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ = -float("""Inf""" ) ,lowerCamelCase_ = 1 ) -> List[Any]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
A = max(lowerCamelCase_ ,lowerCamelCase_ )
A = filter_value
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A , A = scores.shape
A = jnp.full(batch_size * vocab_size ,self.filter_value )
A = min(self.top_k ,scores.shape[-1] ) # Safety check
A , A = lax.top_k(lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.broadcast_to((jnp.arange(lowerCamelCase_ ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
A = topk_scores.flatten()
A = topk_indices.flatten() + shift
A = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase_ )
A = next_scores_flat.reshape(lowerCamelCase_ ,lowerCamelCase_ )
return next_scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> List[Any]:
A = bos_token_id
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = jnp.full(scores.shape ,-float("""inf""" ) )
A = 1 - jnp.bool_(cur_len - 1 )
A = jnp.where(lowerCamelCase_ ,new_scores.at[:, self.bos_token_id].set(0 ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
A = max_length
A = eos_token_id
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = jnp.full(scores.shape ,-float("""inf""" ) )
A = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A = jnp.where(lowerCamelCase_ ,new_scores.at[:, self.eos_token_id].set(0 ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
A = min_length
A = eos_token_id
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
A = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
A = jnp.where(lowerCamelCase_ ,scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = list(lowerCamelCase_ )
A = begin_index
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
A = 1 - jnp.bool_(cur_len - self.begin_index )
A = jnp.where(lowerCamelCase_ ,scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> str:
A = list(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> Union[str, Any]:
A = dict(lowerCamelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A = force_token_array.at[index].set(lowerCamelCase_ )
A = jnp.intaa(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
def _force_token(lowerCamelCase_ ):
A = scores.shape[0]
A = self.force_token_array[generation_idx]
A = jnp.ones_like(lowerCamelCase_ ,dtype=scores.dtype ) * -float("""inf""" )
A = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
A = lax.dynamic_update_slice(lowerCamelCase_ ,lowerCamelCase_ ,(0, current_token) )
return new_scores
A = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(lowerCamelCase_ ) ,lambda: scores ,) ,)
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
A = generate_config.eos_token_id
A = generate_config.no_timestamps_token_id
A = generate_config.no_timestamps_token_id + 1
A = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase_ ,"""max_initial_timestamp_index""" ):
A = generate_config.max_initial_timestamp_index
else:
A = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A = model_config.vocab_size
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
A = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(lowerCamelCase_ ,lowerCamelCase_ ):
A = jnp.where((cur_len - self.begin_index) >= 1 ,lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,lowerCamelCase_ ,)
A = jnp.where((cur_len - self.begin_index) < 2 ,lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,lowerCamelCase_ ,lowerCamelCase_ ,)
return jnp.where(
lowerCamelCase_ ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) ,scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) ,) ,lowerCamelCase_ ,)
A = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(cur_len == self.begin_index ,lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,lowerCamelCase_ ,)
A = self.timestamp_begin + self.max_initial_timestamp_index
A = jnp.where(
lowerCamelCase_ ,scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) ,lowerCamelCase_ ,)
# if sum of probability over timestamps is above any other token, sample timestamp
A = jax.nn.log_softmax(lowerCamelCase_ ,axis=-1 )
def handle_cumulative_probs(lowerCamelCase_ ,lowerCamelCase_ ):
A = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
A = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) ,lowerCamelCase_ ,)
A = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ ,lowerCamelCase_ )
return scores
| 255 | 0 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def A_ (__a ):
'''simple docstring'''
A_ , A_ = np.shape(__a )
if rows != columns:
A_ = (
"'table' has to be of square shaped array but got a "
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(__a )
A_ = np.zeros((rows, columns) )
A_ = np.zeros((rows, columns) )
for i in range(__a ):
for j in range(__a ):
A_ = sum(lower[i][k] * upper[k][j] for k in range(__a ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
A_ = (table[i][j] - total) / upper[j][j]
A_ = 1
for j in range(__a , __a ):
A_ = sum(lower[i][k] * upper[k][j] for k in range(__a ) )
A_ = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 115 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCamelCase_ : Dict = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCamelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 115 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = (DDPMScheduler,)
def _snake_case ( self : Tuple , **snake_case__ : str ) -> List[str]:
_lowerCamelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**snake_case__ )
return config
def _snake_case ( self : Dict ) -> Optional[Any]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def _snake_case ( self : Union[str, Any] ) -> List[str]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case__ , beta_end=snake_case__ )
def _snake_case ( self : List[Any] ) -> Optional[int]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case__ )
def _snake_case ( self : Any ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case__ )
def _snake_case ( self : Any ) -> List[str]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case__ )
def _snake_case ( self : List[Any] ) -> Optional[int]:
self.check_over_configs(thresholding=snake_case__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , )
def _snake_case ( self : List[str] ) -> Dict:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def _snake_case ( self : str ) -> Tuple:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=snake_case__ )
def _snake_case ( self : Optional[Any] ) -> Union[str, Any]:
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**snake_case__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def _snake_case ( self : Optional[Any] ) -> List[Any]:
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**snake_case__ )
_lowerCamelCase = len(snake_case__ )
_lowerCamelCase = self.dummy_model()
_lowerCamelCase = self.dummy_sample_deter
_lowerCamelCase = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCamelCase = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCamelCase = pred_prev_sample
_lowerCamelCase = torch.sum(torch.abs(snake_case__ ) )
_lowerCamelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _snake_case ( self : Any ) -> List[Any]:
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCamelCase = scheduler_class(**snake_case__ )
_lowerCamelCase = len(snake_case__ )
_lowerCamelCase = self.dummy_model()
_lowerCamelCase = self.dummy_sample_deter
_lowerCamelCase = torch.manual_seed(0 )
for t in reversed(range(snake_case__ ) ):
# 1. predict noise residual
_lowerCamelCase = model(snake_case__ , snake_case__ )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_lowerCamelCase = pred_prev_sample
_lowerCamelCase = torch.sum(torch.abs(snake_case__ ) )
_lowerCamelCase = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _snake_case ( self : int ) -> Any:
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**snake_case__ )
_lowerCamelCase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=snake_case__ )
_lowerCamelCase = scheduler.timesteps
for i, timestep in enumerate(snake_case__ ):
if i == len(snake_case__ ) - 1:
_lowerCamelCase = -1
else:
_lowerCamelCase = timesteps[i + 1]
_lowerCamelCase = scheduler.previous_timestep(snake_case__ )
_lowerCamelCase = prev_t.item()
self.assertEqual(snake_case__ , snake_case__ )
def _snake_case ( self : Tuple ) -> List[str]:
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**snake_case__ )
_lowerCamelCase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(snake_case__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case__ )
def _snake_case ( self : int ) -> Dict:
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**snake_case__ )
_lowerCamelCase = [1_0_0, 8_7, 5_0, 1, 0]
_lowerCamelCase = len(snake_case__ )
with self.assertRaises(snake_case__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ )
def _snake_case ( self : List[Any] ) -> Dict:
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**snake_case__ )
_lowerCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case__ ) | 234 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['LayoutLMv2FeatureExtractor']
A = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 234 | 1 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
SCREAMING_SNAKE_CASE__ : str ="""\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
SCREAMING_SNAKE_CASE__ : Optional[int] ="""\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
SCREAMING_SNAKE_CASE__ : Optional[Any] ="""
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def a__ ( self , _lowercase=None , _lowercase=None , _lowercase=False ) -> Any:
if concatenate_texts:
return compute_measures(_lowercase , _lowercase )["wer"]
else:
_lowerCamelCase : Optional[Any] = 0
_lowerCamelCase : List[str] = 0
for prediction, reference in zip(_lowercase , _lowercase ):
_lowerCamelCase : Tuple = compute_measures(_lowercase , _lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 434 | '''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 244 | 0 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 178 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : List[Any] = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 178 | 1 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class A ( unittest.TestCase ):
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=7, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=99, UpperCamelCase__=32, UpperCamelCase__=5, UpperCamelCase__=4, UpperCamelCase__=37, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=512, UpperCamelCase__=16, UpperCamelCase__=2, UpperCamelCase__=0.02, UpperCamelCase__=4, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_attention_mask
lowerCAmelCase_ = use_token_type_ids
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = num_choices
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCAmelCase_ = None
if self.use_attention_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = None
if self.use_token_type_ids:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCAmelCase_ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = True
lowerCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = True
__snake_case = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase_ = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''', from_pt=UpperCamelCase__ )
lowerCAmelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class A ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''', from_pt=UpperCamelCase__ )
lowerCAmelCase_ = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
lowerCAmelCase_ = model(UpperCamelCase__ )[0]
lowerCAmelCase_ = [1, 11, 5_0265]
self.assertEqual(list(output.shape ), UpperCamelCase__ )
# compare the actual values for a slice.
lowerCAmelCase_ = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], UpperCamelCase__, atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''', from_pt=UpperCamelCase__ )
lowerCAmelCase_ = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]], dtype=jnp.intaa )
lowerCAmelCase_ = model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
lowerCAmelCase_ = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]], dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3], UpperCamelCase__, atol=1E-4 ) )
| 431 |
from collections.abc import Sequence
def __UpperCamelCase ( _A , _A = False ):
if not arr:
return 0
lowerCAmelCase_ = 0 if allow_empty_subarrays else float('''-inf''' )
lowerCAmelCase_ = 0.0
for num in arr:
lowerCAmelCase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowerCAmelCase_ = max(_A , _A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_A = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"{max_subarray_sum(nums) = }")
| 431 | 1 |
'''simple docstring'''
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
return int((input_a, input_a).count(0 ) != 0 )
def lowerCAmelCase( ):
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 720 | def lowerCAmelCase( __lowerCamelCase ):
__a = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
__a = hex_num[0] == '-'
if is_negative:
__a = hex_num[1:]
try:
__a = int(__lowerCamelCase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
__a = ''
while int_num > 0:
__a = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 | 0 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def lowercase_ ( __A : Dataset , __A : Dict[str, str] ) -> int:
"""simple docstring"""
lowercase : List[Any] =args.log_outputs
lowercase : Optional[int] ='''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase : Dict =load_metric('''wer''' )
lowercase : Optional[int] =load_metric('''cer''' )
# compute metrics
lowercase : Optional[Any] =wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase : List[str] =cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase : Tuple =F'WER: {wer_result}\nCER: {cer_result}'
print(snake_case_ )
with open(F'{dataset_id}_eval_results.txt' , '''w''' ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase : Tuple =F'log_{dataset_id}_predictions.txt'
lowercase : Any =F'log_{dataset_id}_targets.txt'
with open(snake_case_ , '''w''' ) as p, open(snake_case_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(__A : Optional[Any] , __A : Optional[Any] ):
p.write(F'{i}' + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(F'{i}' + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(snake_case_ , with_indices=snake_case_ )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] ='''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase : Optional[Any] =re.sub(snake_case_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase : Tuple =['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase : str =''' '''.join(text.split(snake_case_ ) )
return text
def lowercase_ ( __A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] =load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase : Dict =AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase : Dict =feature_extractor.sampling_rate
# resample audio
lowercase : Tuple =dataset.cast_column('''audio''' , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
lowercase : Tuple =0 if torch.cuda.is_available() else -1
lowercase : int =pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__A : Dict ):
lowercase : Optional[Any] =asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase : int =prediction['''text''']
lowercase : Dict =normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase : Tuple =dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 94 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
class lowercase__( snake_case__ , snake_case__ ):
'''simple docstring'''
snake_case__ = 1
@register_to_config
def __init__( self , __SCREAMING_SNAKE_CASE = 20_00 , __SCREAMING_SNAKE_CASE = 0.15 , __SCREAMING_SNAKE_CASE = 0.01 , __SCREAMING_SNAKE_CASE = 13_48.0 , __SCREAMING_SNAKE_CASE = 1E-5 , __SCREAMING_SNAKE_CASE = 1 , ) -> str:
"""simple docstring"""
UpperCamelCase__ : Any =sigma_max
# setable values
UpperCamelCase__ : int =None
self.set_sigmas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None) -> torch.FloatTensor:
"""simple docstring"""
return sample
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None) -> int:
"""simple docstring"""
UpperCamelCase__ : Dict =sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCamelCase__ : Union[str, Any] =torch.linspace(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] =sigma_min if sigma_min is not None else self.config.sigma_min
UpperCamelCase__ : Optional[Any] =sigma_max if sigma_max is not None else self.config.sigma_max
UpperCamelCase__ : Optional[int] =sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any =sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCamelCase__ : Optional[int] =torch.exp(torch.linspace(math.log(__SCREAMING_SNAKE_CASE) , math.log(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE))
UpperCamelCase__ : Any =torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device)) , self.discrete_sigmas[timesteps - 1].to(timesteps.device) , )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ) -> Union[SdeVeOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
UpperCamelCase__ : Optional[Any] =timestep * torch.ones(
sample.shape[0] , device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCamelCase__ : Optional[int] =(timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCamelCase__ : Any =timesteps.to(self.discrete_sigmas.device)
UpperCamelCase__ : str =self.discrete_sigmas[timesteps].to(sample.device)
UpperCamelCase__ : int =self.get_adjacent_sigma(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).to(sample.device)
UpperCamelCase__ : Optional[int] =torch.zeros_like(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Union[str, Any] =(sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCamelCase__ : Union[str, Any] =diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
UpperCamelCase__ : Union[str, Any] =diffusion.unsqueeze(-1)
UpperCamelCase__ : Tuple =drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCamelCase__ : List[str] =randn_tensor(
sample.shape , layout=sample.layout , generator=__SCREAMING_SNAKE_CASE , device=sample.device , dtype=sample.dtype)
UpperCamelCase__ : Optional[Any] =sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCamelCase__ : List[Any] =prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__SCREAMING_SNAKE_CASE , prev_sample_mean=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCamelCase__ : Union[str, Any] =randn_tensor(sample.shape , layout=sample.layout , generator=__SCREAMING_SNAKE_CASE).to(sample.device)
# compute step size from the model_output, the noise, and the snr
UpperCamelCase__ : Union[str, Any] =torch.norm(model_output.reshape(model_output.shape[0] , -1) , dim=-1).mean()
UpperCamelCase__ : Union[str, Any] =torch.norm(noise.reshape(noise.shape[0] , -1) , dim=-1).mean()
UpperCamelCase__ : Union[str, Any] =(self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCamelCase__ : List[Any] =step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCamelCase__ : Tuple =step_size.flatten()
while len(step_size.shape) < len(sample.shape):
UpperCamelCase__ : List[Any] =step_size.unsqueeze(-1)
UpperCamelCase__ : str =sample + step_size * model_output
UpperCamelCase__ : List[str] =prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__SCREAMING_SNAKE_CASE)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> torch.FloatTensor:
"""simple docstring"""
UpperCamelCase__ : List[Any] =timesteps.to(original_samples.device)
UpperCamelCase__ : Tuple =self.discrete_sigmas.to(original_samples.device)[timesteps]
UpperCamelCase__ : List[str] =(
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__SCREAMING_SNAKE_CASE) * sigmas[:, None, None, None]
)
UpperCamelCase__ : str =noise + original_samples
return noisy_samples
def __len__( self) -> Any:
"""simple docstring"""
return self.config.num_train_timesteps
| 582 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 582 | 1 |
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCAmelCase : List[Any] = 'scheduler_config.json'
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 1
lowerCAmelCase_ = 2
lowerCAmelCase_ = 3
lowerCAmelCase_ = 4
lowerCAmelCase_ = 5
lowerCAmelCase_ = 6
lowerCAmelCase_ = 7
lowerCAmelCase_ = 8
lowerCAmelCase_ = 9
lowerCAmelCase_ = 10
lowerCAmelCase_ = 11
lowerCAmelCase_ = 12
lowerCAmelCase_ = 13
lowerCAmelCase_ = 14
@dataclass
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = 42
class SCREAMING_SNAKE_CASE__ :
lowerCAmelCase_ = SCHEDULER_CONFIG_NAME
lowerCAmelCase_ = []
lowerCAmelCase_ = True
@classmethod
def UpperCAmelCase_ ( cls , A_ = None , A_ = None , A_=False , **A_ , )-> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = cls.load_config(
pretrained_model_name_or_path=A_ , subfolder=A_ , return_unused_kwargs=A_ , return_commit_hash=A_ , **A_ , )
return cls.from_config(A_ , return_unused_kwargs=A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ = False , **A_ )-> Any:
'''simple docstring'''
self.save_config(save_directory=A_ , push_to_hub=A_ , **A_ )
@property
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def UpperCAmelCase_ ( cls )-> Any:
'''simple docstring'''
UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) )
UpperCamelCase = importlib.import_module(__name__.split('.' )[0] )
UpperCamelCase = [
getattr(A_ , A_ ) for c in compatible_classes_str if hasattr(A_ , A_ )
]
return compatible_classes
| 3 |
'''simple docstring'''
lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def A_( A : dict , A : str , A : Optional[Any]):
UpperCamelCase = set()
# keep track of all the paths to be checked
UpperCamelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase = queue.pop(0)
# get the last node from the path
UpperCamelCase = path[-1]
if node not in explored:
UpperCamelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase = list(A)
new_path.append(A)
queue.append(A)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(A)
# in case there's no path between the 2 nodes
return []
def A_( A : dict , A : str , A : Tuple):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase = [start]
UpperCamelCase = set(A)
# Keep tab on distances from `start` node.
UpperCamelCase = {start: 0, target: -1}
while queue:
UpperCamelCase = queue.pop(0)
if node == target:
UpperCamelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(A)
queue.append(A)
UpperCamelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 3 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["ViTFeatureExtractor"]
__UpperCAmelCase = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 259 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=False , ) ->str:
"""simple docstring"""
output_path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase_ , lowercase_ , f=output_path.as_posix() , input_names=lowercase_ , output_names=lowercase_ , dynamic_axes=lowercase_ , do_constant_folding=lowercase_ , use_external_data_format=lowercase_ , enable_onnx_checker=lowercase_ , opset_version=lowercase_ , )
else:
export(
lowercase_ , lowercase_ , f=output_path.as_posix() , input_names=lowercase_ , output_names=lowercase_ , dynamic_axes=lowercase_ , do_constant_folding=lowercase_ , opset_version=lowercase_ , )
@torch.no_grad()
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = False ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
SCREAMING_SNAKE_CASE = 'cpu'
SCREAMING_SNAKE_CASE = Path(lowercase_ )
# VAE DECODER
SCREAMING_SNAKE_CASE = AutoencoderKL.from_pretrained(model_path + '/vae' )
SCREAMING_SNAKE_CASE = vae_decoder.config.latent_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE = vae_decoder.decode
onnx_export(
lowercase_ , model_args=(
torch.randn(1 , lowercase_ , 2_5 , 2_5 ).to(device=lowercase_ , dtype=lowercase_ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=lowercase_ , )
del vae_decoder
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
__UpperCAmelCase = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 259 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowercase ( UpperCamelCase_,unittest.TestCase ):
_a = FlaxAutoencoderKL
@property
def a__ ( self ) -> Tuple:
_A : str = 4
_A : Any = 3
_A : Dict = (32, 32)
_A : Optional[Any] = jax.random.PRNGKey(0 )
_A : Tuple = jax.random.uniform(UpperCamelCase__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def a__ ( self ) -> int:
_A : List[Any] = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
_A : Dict = self.dummy_input
return init_dict, inputs_dict
| 307 | import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = OmegaConf.load(_lowercase )
SCREAMING_SNAKE_CASE : int = torch.load(_lowercase , map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE : int = list(state_dict.keys() )
# extract state_dict for VQVAE
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
SCREAMING_SNAKE_CASE : List[str] = '''first_stage_model.'''
for key in keys:
if key.startswith(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = state_dict[key]
# extract state_dict for UNetLDM
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : Any = '''model.diffusion_model.'''
for key in keys:
if key.startswith(_lowercase ):
SCREAMING_SNAKE_CASE : List[Any] = state_dict[key]
SCREAMING_SNAKE_CASE : int = config.model.params.first_stage_config.params
SCREAMING_SNAKE_CASE : Tuple = config.model.params.unet_config.params
SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**_lowercase ).eval()
vqvae.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = UNetLDMModel(**_lowercase ).eval()
unet.load_state_dict(_lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_lowercase , )
SCREAMING_SNAKE_CASE : Optional[Any] = LDMPipeline(_lowercase , _lowercase , _lowercase )
pipeline.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
__UpperCamelCase : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 248 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
snake_case_ : Any = [
'good first issue',
'feature request',
'wip',
]
def __snake_case ( ):
UpperCamelCase = Github(os.environ['''GITHUB_TOKEN'''])
UpperCamelCase = g.get_repo('''huggingface/accelerate''')
UpperCamelCase = repo.get_issues(state='''open''')
for issue in open_issues:
UpperCamelCase = sorted([comment for comment in issue.get_comments()], key=lambda _UpperCAmelCase: i.created_at, reverse=_UpperCAmelCase)
UpperCamelCase = comments[0] if len(_UpperCAmelCase) > 0 else None
UpperCamelCase = dt.utcnow()
UpperCamelCase = (current_time - issue.updated_at).days
UpperCamelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''')
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''')
if __name__ == "__main__":
main()
| 715 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class lowercase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1 , lowerCamelCase__=False , **lowerCamelCase__ ):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [vocab_size]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
UpperCamelCase = keep_order
UpperCamelCase = []
UpperCamelCase = []
def UpperCAmelCase ( self , lowerCamelCase__ ):
'''simple docstring'''
if self.n_clusters > 0:
UpperCamelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=lowerCamelCase__ , name='''cluster_weight''' )
UpperCamelCase = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=lowerCamelCase__ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCamelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_projs_._{i}' , )
self.out_projs.append(lowerCamelCase__ )
else:
self.out_projs.append(lowerCamelCase__ )
UpperCamelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._weight' , )
UpperCamelCase = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.d_embed // (self.div_val**i)
UpperCamelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_projs_._{i}' )
self.out_projs.append(lowerCamelCase__ )
UpperCamelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._weight' , )
UpperCamelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=lowerCamelCase__ , name=f'out_layers_._{i}_._bias' , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase__ )
@staticmethod
def UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
'''simple docstring'''
UpperCamelCase = x
if proj is not None:
UpperCamelCase = tf.einsum('''ibd,ed->ibe''' , lowerCamelCase__ , lowerCamelCase__ )
return tf.einsum('''ibd,nd->ibn''' , lowerCamelCase__ , lowerCamelCase__ ) + b
@staticmethod
def UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase = shape_list(lowerCamelCase__ )
UpperCamelCase = tf.range(lp_size[0] , dtype=target.dtype )
UpperCamelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=False ):
'''simple docstring'''
UpperCamelCase = 0
if self.n_clusters == 0:
UpperCamelCase = self._logit(lowerCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCamelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase__ , logits=lowerCamelCase__ )
UpperCamelCase = tf.nn.log_softmax(lowerCamelCase__ , axis=-1 )
else:
UpperCamelCase = shape_list(lowerCamelCase__ )
UpperCamelCase = []
UpperCamelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCamelCase = (target >= l_idx) & (target < r_idx)
UpperCamelCase = tf.where(lowerCamelCase__ )
UpperCamelCase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ ) - l_idx
if self.div_val == 1:
UpperCamelCase = self.out_layers[0][0][l_idx:r_idx]
UpperCamelCase = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i][0]
UpperCamelCase = self.out_layers[i][1]
if i == 0:
UpperCamelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCamelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCamelCase = self._logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.out_projs[0] )
UpperCamelCase = tf.nn.log_softmax(lowerCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCamelCase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = self._gather_logprob(lowerCamelCase__ , lowerCamelCase__ )
else:
UpperCamelCase = self._logit(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.out_projs[i] )
UpperCamelCase = tf.nn.log_softmax(lowerCamelCase__ )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCamelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase__ )
if target is not None:
UpperCamelCase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = tf.boolean_mask(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase = self._gather_logprob(lowerCamelCase__ , lowerCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase__ , -cur_logprob , shape_list(lowerCamelCase__ ) )
UpperCamelCase = tf.concat(lowerCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
UpperCamelCase = tf.reduce_mean(lowerCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase__ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 350 | 0 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DDIMPipeline
lowerCAmelCase : str = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowerCAmelCase : List[Any] = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
lowerCAmelCase : List[Any] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowerCAmelCase : Optional[int] = False
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCamelCase: Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
_UpperCamelCase: Dict = DDIMScheduler()
_UpperCamelCase: Dict = {"unet": unet, "scheduler": scheduler}
return components
def lowerCAmelCase ( self : str , _lowercase : List[str] , _lowercase : Tuple=0 ):
"""simple docstring"""
if str(_lowercase ).startswith('''mps''' ):
_UpperCamelCase: Tuple = torch.manual_seed(_lowercase )
else:
_UpperCamelCase: Dict = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_UpperCamelCase: str = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: str = "cpu"
_UpperCamelCase: Tuple = self.get_dummy_components()
_UpperCamelCase: Dict = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
_UpperCamelCase: int = self.get_dummy_inputs(_lowercase )
_UpperCamelCase: Any = pipe(**_lowercase ).images
_UpperCamelCase: Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_UpperCamelCase: Optional[int] = np.array(
[1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04] )
_UpperCamelCase: str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowercase , 1E-3 )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: List[str] = "google/ddpm-cifar10-32"
_UpperCamelCase: Optional[int] = UNetaDModel.from_pretrained(_lowercase )
_UpperCamelCase: Union[str, Any] = DDIMScheduler()
_UpperCamelCase: Any = DDIMPipeline(unet=_lowercase , scheduler=_lowercase )
ddim.to(_lowercase )
ddim.set_progress_bar_config(disable=_lowercase )
_UpperCamelCase: Optional[int] = torch.manual_seed(0 )
_UpperCamelCase: Optional[Any] = ddim(generator=_lowercase , eta=0.0 , output_type='''numpy''' ).images
_UpperCamelCase: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase: str = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: int = "google/ddpm-ema-bedroom-256"
_UpperCamelCase: List[Any] = UNetaDModel.from_pretrained(_lowercase )
_UpperCamelCase: Dict = DDIMScheduler.from_pretrained(_lowercase )
_UpperCamelCase: List[str] = DDIMPipeline(unet=_lowercase , scheduler=_lowercase )
ddpm.to(_lowercase )
ddpm.set_progress_bar_config(disable=_lowercase )
_UpperCamelCase: str = torch.manual_seed(0 )
_UpperCamelCase: List[str] = ddpm(generator=_lowercase , output_type='''numpy''' ).images
_UpperCamelCase: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCamelCase: List[str] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 271 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCAmelCase__ = {
'''b0''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 2_2_4,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1_2_8_0,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 2_4_0,
'''dropout_rate''': 0.2,
'''dw_padding''': [1_6],
},
'''b2''': {
'''hidden_dim''': 1_4_0_8,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 2_6_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 1_6],
},
'''b3''': {
'''hidden_dim''': 1_5_3_6,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 3_0_0,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 1_8],
},
'''b4''': {
'''hidden_dim''': 1_7_9_2,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 3_8_0,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2_0_4_8,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 4_5_6,
'''dropout_rate''': 0.4,
'''dw_padding''': [1_3, 2_7],
},
'''b6''': {
'''hidden_dim''': 2_3_0_4,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 5_2_8,
'''dropout_rate''': 0.5,
'''dw_padding''': [3_1],
},
'''b7''': {
'''hidden_dim''': 2_5_6_0,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 6_0_0,
'''dropout_rate''': 0.5,
'''dw_padding''': [1_8],
},
}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = EfficientNetConfig()
lowercase__ : str = CONFIG_MAP[model_name]["hidden_dim"]
lowercase__ : Union[str, Any] = CONFIG_MAP[model_name]["width_coef"]
lowercase__ : List[Any] = CONFIG_MAP[model_name]["depth_coef"]
lowercase__ : Optional[int] = CONFIG_MAP[model_name]["image_size"]
lowercase__ : Tuple = CONFIG_MAP[model_name]["dropout_rate"]
lowercase__ : Dict = CONFIG_MAP[model_name]["dw_padding"]
lowercase__ : str = "huggingface/label-files"
lowercase__ : List[Any] = "imagenet-1k-id2label.json"
lowercase__ : Any = 1_000
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : Tuple = idalabel
lowercase__ : Dict = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = CONFIG_MAP[model_name]["image_size"]
lowercase__ : List[str] = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowerCamelCase__ , )
return preprocessor
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowercase__ : List[str] = sorted(set(lowerCamelCase__ ) )
lowercase__ : Optional[int] = len(lowerCamelCase__ )
lowercase__ : Optional[int] = {b: str(lowerCamelCase__ ) for b, i in zip(lowerCamelCase__ , range(lowerCamelCase__ ) )}
lowercase__ : Tuple = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowercase__ : Optional[Any] = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowercase__ : Any = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ : Optional[Any] = "efficientnet." + item[1]
lowercase__ : str = "classifier.weight"
lowercase__ : Optional[int] = "classifier.bias"
return key_mapping
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ : List[Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ : Optional[int] = torch.from_numpy(lowerCamelCase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ : str = torch.from_numpy(lowerCamelCase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ : Tuple = torch.from_numpy(np.transpose(lowerCamelCase__ ) )
else:
lowercase__ : Optional[Any] = torch.from_numpy(lowerCamelCase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowerCamelCase__ )
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = model_classes[model_name](
include_top=lowerCamelCase__ , weights="imagenet" , input_tensor=lowerCamelCase__ , input_shape=lowerCamelCase__ , pooling=lowerCamelCase__ , classes=1_000 , classifier_activation="softmax" , )
lowercase__ : List[str] = original_model.trainable_variables
lowercase__ : Optional[int] = original_model.non_trainable_variables
lowercase__ : str = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ : List[Any] = param.numpy()
lowercase__ : Optional[Any] = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ : List[Any] = get_efficientnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = EfficientNetForImageClassification(lowerCamelCase__ ).eval()
lowercase__ : List[Any] = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowercase__ : Optional[Any] = rename_keys(lowerCamelCase__ )
replace_params(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Initialize preprocessor and preprocess input image
lowercase__ : str = convert_image_processor(lowerCamelCase__ )
lowercase__ : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = hf_model(**lowerCamelCase__ )
lowercase__ : Dict = outputs.logits.detach().numpy()
# Original model inference
lowercase__ : Tuple = False
lowercase__ : Dict = CONFIG_MAP[model_name]["image_size"]
lowercase__ : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ : Dict = image.img_to_array(lowerCamelCase__ )
lowercase__ : Optional[int] = np.expand_dims(lowerCamelCase__ , axis=0 )
lowercase__ : Dict = original_model.predict(lowerCamelCase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowerCamelCase__ ):
os.mkdir(lowerCamelCase__ )
# Save converted model and image processor
hf_model.save_pretrained(lowerCamelCase__ )
preprocessor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ : List[Any] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(lowerCamelCase__ )
hf_model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 496 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCamelCase ( __snake_case , __snake_case ):
__lowerCamelCase = 'bit'
__lowerCamelCase = ['preactivation', 'bottleneck']
__lowerCamelCase = ['SAME', 'VALID']
def __init__( self , __lowerCamelCase=3 , __lowerCamelCase=64 , __lowerCamelCase=[2_56, 5_12, 10_24, 20_48] , __lowerCamelCase=[3, 4, 6, 3] , __lowerCamelCase="preactivation" , __lowerCamelCase="relu" , __lowerCamelCase=None , __lowerCamelCase=32 , __lowerCamelCase=0.0 , __lowerCamelCase=False , __lowerCamelCase=32 , __lowerCamelCase=1 , __lowerCamelCase=None , __lowerCamelCase=None , **__lowerCamelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
snake_case: Any = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
snake_case: str = num_channels
snake_case: Any = embedding_size
snake_case: int = hidden_sizes
snake_case: Tuple = depths
snake_case: Union[str, Any] = layer_type
snake_case: Tuple = hidden_act
snake_case: Optional[int] = global_padding
snake_case: List[str] = num_groups
snake_case: Optional[int] = drop_path_rate
snake_case: Optional[int] = embedding_dynamic_padding
snake_case: Tuple = output_stride
snake_case: str = width_factor
snake_case: List[str] = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(__lowerCamelCase ) + 1 )]
snake_case , snake_case: Union[str, Any] = get_aligned_output_features_output_indices(
out_features=__lowerCamelCase , out_indices=__lowerCamelCase , stage_names=self.stage_names )
| 164 | import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
__lowerCAmelCase : str = parser.parse_args()
__lowerCAmelCase : Union[str, Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 164 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''ctrl'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a__=2_4_6_5_3_4 , a__=2_5_6 , a__=1_2_8_0 , a__=8_1_9_2 , a__=4_8 , a__=1_6 , a__=0.1 , a__=0.1 , a__=1e-6 , a__=0.0_2 , a__=True , **a__ , ):
A__ = vocab_size
A__ = n_positions
A__ = n_embd
A__ = n_layer
A__ = n_head
A__ = dff
A__ = resid_pdrop
A__ = embd_pdrop
A__ = layer_norm_epsilon
A__ = initializer_range
A__ = use_cache
super().__init__(**a__)
| 632 |
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase_ : dict , UpperCamelCase_ : str )-> set[str]:
A__ , A__ = set(UpperCamelCase_ ), [start]
while stack:
A__ = stack.pop()
explored.add(UpperCamelCase_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCamelCase_ )
return explored
_lowercase = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 632 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class __A :
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> None:
lowercase__ = value
lowercase__ = None
lowercase__ = None
class __A :
"""simple docstring"""
def __init__( self , _lowerCamelCase )-> None:
lowercase__ = tree
def snake_case_( self , _lowerCamelCase )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _lowerCAmelCase ( lowercase : int ) ->Tuple:
"""simple docstring"""
def is_in_circle(lowercase : float , lowercase : float ) -> bool:
lowercase__ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
lowercase__ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowercase ) )
# The ratio of the area for circle to square is pi/4.
lowercase__ = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def _lowerCAmelCase ( lowercase : int , lowercase : Callable[[float], float] , lowercase : float = 0.0 , lowercase : float = 1.0 , ) ->float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowercase , lowercase ) ) for _ in range(lowercase ) ) * (max_value - min_value)
def _lowerCAmelCase ( lowercase : int , lowercase : float = 0.0 , lowercase : float = 1.0 ) ->None:
"""simple docstring"""
def identity_function(lowercase : float ) -> float:
return x
lowercase__ = area_under_curve_estimator(
lowercase , lowercase , lowercase , lowercase )
lowercase__ = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def _lowerCAmelCase ( lowercase : int ) ->None:
"""simple docstring"""
def function_to_integrate(lowercase : float ) -> float:
return sqrt(4.0 - x * x )
lowercase__ = area_under_curve_estimator(
lowercase , lowercase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :List[str] = ['pixel_values']
def __init__( self : Union[str, Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Optional[Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = size_divisor
lowerCAmelCase__ = resample
super().__init__(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ ,lowerCAmelCase__ = get_image_size(__magic_name__ )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCAmelCase__ = height // size_divisor * size_divisor
lowerCAmelCase__ = width // size_divisor * size_divisor
lowerCAmelCase__ = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
return image
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[int] ):
"""simple docstring"""
return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Optional[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = size_divisor if size_divisor is not None else self.size_divisor
lowerCAmelCase__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
lowerCAmelCase__ = make_list_of_images(__magic_name__ )
if not valid_images(__magic_name__ ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(__magic_name__ ) for img in images]
if do_resize:
lowerCAmelCase__ = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(__magic_name__ , scale=1 / 255 ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images]
lowerCAmelCase__ = {"pixel_values": images}
return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
| 48 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : str=True , _lowerCamelCase : Optional[int]="pt" ):
__a : List[str] = {"""add_prefix_space""": True} if isinstance(_lowerCamelCase , _lowerCamelCase ) and not line.startswith(""" """ ) else {}
__a : List[Any] = padding_side
return tokenizer(
[line] , max_length=_lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=None , ):
__a : Optional[Any] = input_ids.ne(_lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase="train" , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="" , ):
'''simple docstring'''
super().__init__()
__a : Union[str, Any] = Path(_lowercase ).joinpath(type_path + """.source""" )
__a : List[Any] = Path(_lowercase ).joinpath(type_path + """.target""" )
__a : Union[str, Any] = self.get_char_lens(self.src_file )
__a : int = max_source_length
__a : Optional[int] = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
__a : Tuple = tokenizer
__a : int = prefix
if n_obs is not None:
__a : Optional[Any] = self.src_lens[:n_obs]
__a : Dict = src_lang
__a : Union[str, Any] = tgt_lang
def __len__(self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__(self , _lowercase ):
'''simple docstring'''
__a : Tuple = index + 1 # linecache starts at 1
__a : Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , _lowercase ).rstrip("""\n""" )
__a : Dict = linecache.getline(str(self.tgt_file ) , _lowercase ).rstrip("""\n""" )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__a : Optional[Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
)
__a : str = self.tokenizer.generator if isinstance(self.tokenizer , _lowercase ) else self.tokenizer
__a : Dict = encode_line(_lowercase , _lowercase , self.max_source_length , """right""" )
__a : Dict = encode_line(_lowercase , _lowercase , self.max_target_length , """right""" )
__a : Dict = source_inputs["""input_ids"""].squeeze()
__a : Tuple = target_inputs["""input_ids"""].squeeze()
__a : List[str] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return [len(_lowercase ) for x in Path(_lowercase ).open().readlines()]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Optional[int] = torch.stack([x["""input_ids"""] for x in batch] )
__a : Any = torch.stack([x["""attention_mask"""] for x in batch] )
__a : Optional[Any] = torch.stack([x["""decoder_input_ids"""] for x in batch] )
__a : str = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
__a : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowercase )
else self.tokenizer.pad_token_id
)
__a : Union[str, Any] = trim_batch(_lowercase , _lowercase )
__a , __a : Tuple = trim_batch(_lowercase , _lowercase , attention_mask=_lowercase )
__a : Optional[Any] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
lowercase__ = getLogger(__name__)
def __magic_name__ ( _lowerCamelCase : List[List] ):
return list(itertools.chain.from_iterable(_lowerCamelCase ) )
def __magic_name__ ( _lowerCamelCase : str ):
__a : Dict = get_git_info()
save_json(_lowerCamelCase , os.path.join(_lowerCamelCase , """git_log.json""" ) )
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str]=4 , **_lowerCamelCase : List[str] ):
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=_lowerCamelCase , **_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[int] ):
with open(_lowerCamelCase ) as f:
return json.load(_lowerCamelCase )
def __magic_name__ ( ):
__a : Dict = git.Repo(search_parent_directories=_lowerCamelCase )
__a : Any = {
"""repo_id""": str(_lowerCamelCase ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
"""hostname""": str(socket.gethostname() ),
}
return repo_infos
def __magic_name__ ( _lowerCamelCase : Callable , _lowerCamelCase : Iterable ):
return list(map(_lowerCamelCase , _lowerCamelCase ) )
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] ):
with open(_lowerCamelCase , """wb""" ) as f:
return pickle.dump(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any ):
def remove_articles(_lowerCamelCase : Optional[Any] ):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , _lowerCamelCase )
def white_space_fix(_lowerCamelCase : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase : Dict ):
__a : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict ):
__a : Tuple = normalize_answer(_lowerCamelCase ).split()
__a : Tuple = normalize_answer(_lowerCamelCase ).split()
__a : Tuple = Counter(_lowerCamelCase ) & Counter(_lowerCamelCase )
__a : Any = sum(common.values() )
if num_same == 0:
return 0
__a : Union[str, Any] = 1.0 * num_same / len(_lowerCamelCase )
__a : Tuple = 1.0 * num_same / len(_lowerCamelCase )
__a : Tuple = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Dict ):
return normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] ):
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
__a : Any = 0
for hypo, pred in zip(_lowerCamelCase , _lowerCamelCase ):
em += exact_match_score(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
em /= len(_lowerCamelCase )
return {"em": em}
def __magic_name__ ( _lowerCamelCase : List[Any] ):
return model_prefix.startswith("""rag""" )
def __magic_name__ ( _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : Tuple ):
__a : Union[str, Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__a : Dict = """dropout_rate"""
for p in extra_params:
if getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not hasattr(_lowerCamelCase , _lowerCamelCase ) and not hasattr(_lowerCamelCase , equivalent_param[p] ):
logger.info("""config doesn't have a `{}` attribute""".format(_lowerCamelCase ) )
delattr(_lowerCamelCase , _lowerCamelCase )
continue
__a : Any = p if hasattr(_lowerCamelCase , _lowerCamelCase ) else equivalent_param[p]
setattr(_lowerCamelCase , _lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
delattr(_lowerCamelCase , _lowerCamelCase )
return hparams, config
| 581 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def _snake_case ( _snake_case : Any , _snake_case : str , _snake_case : int=None , _snake_case : Optional[int]=None , _snake_case : List[Any]=None , _snake_case : Any=None , _snake_case : List[str]=None , _snake_case : Dict=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
_A = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_A = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_A = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_A = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=13 , _UpperCAmelCase : Dict=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Union[str, Any]=99 , _UpperCAmelCase : int=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Optional[Any]=0 , _UpperCAmelCase : int=0.02 , ):
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = eos_token_id
_A = pad_token_id
_A = bos_token_id
_A = initializer_range
def lowerCAmelCase_ ( self : Any ):
_A = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_A = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_A = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
_A = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , )
_A = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Any ):
_A , _A = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ):
_A = 20
_A = model_class_name(_UpperCAmelCase )
_A = model.encode(inputs_dict['input_ids'] )
_A , _A = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_A = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
_A = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
_A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_A = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , )
_A = model.decode(_UpperCAmelCase , _UpperCAmelCase )
_A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] ):
_A = 20
_A = model_class_name(_UpperCAmelCase )
_A = model.encode(inputs_dict['input_ids'] )
_A , _A = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_A = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_A = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
_A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
_A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_A = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
_A = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase )
_A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 99
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_A = input_ids.shape[0]
_A = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase_ ( self : int ):
_A , _A , _A = self._get_config_and_data()
_A = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
_A = lm_model(input_ids=_UpperCAmelCase )
_A = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_A = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_A = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase )
_A = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_A = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_A = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
_A = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_A = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_A = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
_A = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
_A = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowercase_ ( __lowerCAmelCase , unittest.TestCase , __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Dict = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCAmelCase : Optional[int] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase_ ( self : Any ):
_A = FlaxBlenderbotSmallModelTester(self )
def lowerCAmelCase_ ( self : Dict ):
_A , _A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
_A = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(_UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any]=None , **_UpperCAmelCase : Optional[int] ):
return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
with self.subTest('JIT Enabled' ):
_A = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_A = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A = model_class(_UpperCAmelCase )
_A = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_A = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any ):
return model.decode(
decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , )
with self.subTest('JIT Enabled' ):
_A = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_A = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : Any ):
for model_class_name in self.all_model_classes:
_A = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_A = np.ones((1, 1) ) * model.config.eos_token_id
_A = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
| 505 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {'''vocab_file''': '''spiece.model'''}
a = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
a = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a = '''▁'''
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = VOCAB_FILES_NAMES
UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : int="[CLS]" , _UpperCAmelCase : Dict="[SEP]" , _UpperCAmelCase : Dict="<unk>" , _UpperCAmelCase : int="[SEP]" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : int="[CLS]" , _UpperCAmelCase : Dict="[MASK]" , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_A = (
AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else mask_token
)
_A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
_A = do_lower_case
_A = remove_space
_A = keep_accents
_A = vocab_file
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return len(self.sp_model )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
_A = self.__dict__.copy()
_A = None
return state
def __setstate__( self : str , _UpperCAmelCase : Optional[Any] ):
_A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_A = {}
_A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[str] ):
if self.remove_space:
_A = ' '.join(inputs.strip().split() )
else:
_A = inputs
_A = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
_A = unicodedata.normalize('NFKD' , _UpperCAmelCase )
_A = ''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] )
if self.do_lower_case:
_A = outputs.lower()
return outputs
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : str ):
_A = self.preprocess_text(_UpperCAmelCase )
_A = self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
_A = []
for piece in pieces:
if len(_UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_A = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_A = cur_pieces[1:]
else:
_A = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCAmelCase )
else:
new_pieces.append(_UpperCAmelCase )
return new_pieces
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[int] ):
return self.sp_model.PieceToId(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[Any] ):
return self.sp_model.IdToPiece(_UpperCAmelCase )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Any] ):
_A = []
_A = ''
_A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_A = True
_A = []
else:
current_sub_tokens.append(_UpperCAmelCase )
_A = False
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ):
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
_A = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 505 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase_ :
def __init__( self : List[str] , _A : Union[str, Any] , _A : Tuple=13 , _A : Optional[int]=7 , _A : Optional[Any]=True , _A : Tuple=True , _A : Union[str, Any]=True , _A : Any=99 , _A : Optional[int]=32 , _A : Optional[Any]=5 , _A : Optional[Any]=4 , _A : Tuple=37 , _A : Tuple="gelu" , _A : int=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Union[str, Any]=16 , _A : Optional[int]=2 , _A : List[str]=0.0_2 , _A : Optional[Any]=3 , _A : Dict=4 , _A : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : Optional[int] = seq_length
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Dict = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : int = scope
UpperCAmelCase__ : List[Any] = self.vocab_size - 1
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCAmelCase__ : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self : Tuple , _A : Tuple , _A : Optional[int] , _A : int , _A : Tuple , *_A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = OpenAIGPTModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[int] = model(_A , token_type_ids=_A , head_mask=_A )
UpperCAmelCase__ : Union[str, Any] = model(_A , token_type_ids=_A )
UpperCAmelCase__ : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Dict , _A : List[str] , _A : int , *_A : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = OpenAIGPTLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[str] = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict , _A : List[Any] , *_A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : str , _A : int , _A : Tuple , _A : Optional[int] , _A : Tuple , *_A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : List[str] = OpenAIGPTForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : str = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self : str , _A : Tuple , _A : Union[str, Any] , _A : Tuple , _A : Tuple , _A : Dict ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase_ ( self : Union[str, Any] , _A : List[Any] , _A : int , _A : Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_A , )
UpperCAmelCase__ : List[str] = inputs_dict['''labels''']
UpperCAmelCase__ : Optional[Any] = inputs_dict['''labels''']
UpperCAmelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_A , )
UpperCAmelCase__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = OpenAIGPTModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self , config_class=_A , n_embd=37 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(_A )
UpperCAmelCase__ : List[str] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=_A ) # the president is
UpperCAmelCase__ : str = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase__ : Tuple = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 75 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = None
_snake_case = None
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''feature_size''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''sampling_rate''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''padding_value''' ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , processed_features[input_name] ) ) )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase__ )
UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase__ )
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase__ )
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
UpperCamelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCamelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCAmelCase ( self , lowerCamelCase__=False ):
'''simple docstring'''
def _inputs_have_equal_length(lowerCamelCase__ ):
UpperCamelCase = len(input[0] )
for input_slice in input[1:]:
if len(lowerCamelCase__ ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase__ , lowerCamelCase__ ):
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
return False
for input_slice_a, input_slice_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
if not np.allclose(np.asarray(lowerCamelCase__ ) , np.asarray(lowerCamelCase__ ) , atol=1e-3 ):
return False
return True
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCamelCase__ )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = self.feat_extract_tester.seq_length_diff
UpperCamelCase = self.feat_extract_tester.max_seq_length + pad_diff
UpperCamelCase = self.feat_extract_tester.min_seq_length
UpperCamelCase = self.feat_extract_tester.batch_size
UpperCamelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCamelCase = feat_extract.pad(lowerCamelCase__ , padding=lowerCamelCase__ )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(lowerCamelCase__ , padding='''longest''' )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(lowerCamelCase__ , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(lowerCamelCase__ , padding='''longest''' , return_tensors='''np''' )
UpperCamelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCamelCase__ ):
feat_extract.pad(lowerCamelCase__ , padding='''max_length''' )[input_name]
UpperCamelCase = feat_extract.pad(
lowerCamelCase__ , padding='''max_length''' , max_length=lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCamelCase__ ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase__ ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase__ ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCamelCase = feat_extract.pad(lowerCamelCase__ , pad_to_multiple_of=1_0 )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(lowerCamelCase__ , padding='''longest''' , pad_to_multiple_of=1_0 )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(
lowerCamelCase__ , padding='''max_length''' , pad_to_multiple_of=1_0 , max_length=lowerCamelCase__ )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(
lowerCamelCase__ , padding='''max_length''' , pad_to_multiple_of=1_0 , max_length=lowerCamelCase__ , return_tensors='''np''' , )
UpperCamelCase = input_a[input_name]
self.assertTrue(all(len(lowerCamelCase__ ) % 1_0 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0
self.assertTrue(all(len(lowerCamelCase__ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCamelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def UpperCAmelCase ( self , lowerCamelCase__=False ):
'''simple docstring'''
def _inputs_have_equal_length(lowerCamelCase__ ):
UpperCamelCase = len(input[0] )
for input_slice in input[1:]:
if len(lowerCamelCase__ ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase__ , lowerCamelCase__ ):
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
return False
for input_slice_a, input_slice_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
if not np.allclose(np.asarray(lowerCamelCase__ ) , np.asarray(lowerCamelCase__ ) , atol=1e-3 ):
return False
return True
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCamelCase__ )
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCamelCase = feat_extract.pad(
lowerCamelCase__ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=lowerCamelCase__ )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(lowerCamelCase__ , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
UpperCamelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCamelCase__ ) )
self.assertFalse(_inputs_have_equal_length(lowerCamelCase__ ) )
# truncate to smallest with np
UpperCamelCase = feat_extract.pad(
lowerCamelCase__ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=lowerCamelCase__ , )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(
lowerCamelCase__ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
UpperCamelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCamelCase__ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCamelCase__ ) )
# truncate to middle
UpperCamelCase = feat_extract.pad(
lowerCamelCase__ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=lowerCamelCase__ , return_tensors='''np''' , )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(
lowerCamelCase__ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=lowerCamelCase__ )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(
lowerCamelCase__ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
UpperCamelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase__ ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase__ ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase__ , lowerCamelCase__ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCamelCase__ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase__ ):
feat_extract.pad(lowerCamelCase__ , truncation=lowerCamelCase__ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase__ ):
feat_extract.pad(lowerCamelCase__ , padding='''longest''' , truncation=lowerCamelCase__ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase__ ):
feat_extract.pad(lowerCamelCase__ , padding='''longest''' , truncation=lowerCamelCase__ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCamelCase__ ):
feat_extract.pad(lowerCamelCase__ , padding='''max_length''' , truncation=lowerCamelCase__ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCamelCase = 1_2
UpperCamelCase = feat_extract.pad(
lowerCamelCase__ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCamelCase__ , truncation=lowerCamelCase__ , )
UpperCamelCase = input_a[input_name]
UpperCamelCase = feat_extract.pad(
lowerCamelCase__ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCamelCase__ , )
UpperCamelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCamelCase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCamelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase__ ) )
self.assertFalse(_inputs_have_equal_length(lowerCamelCase__ ) )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._check_padding(numpify=lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._check_padding(numpify=lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._check_truncation(numpify=lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._check_truncation(numpify=lowerCamelCase__ )
@require_torch
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = feat_extract.pad(lowerCamelCase__ , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCamelCase = feat_extract.pad(lowerCamelCase__ , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = feat_extract.pad(lowerCamelCase__ , padding='''longest''' , return_tensors='''np''' )[input_name]
UpperCamelCase = feat_extract.pad(lowerCamelCase__ , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feat_extract_dict
UpperCamelCase = True
UpperCamelCase = self.feature_extraction_class(**lowerCamelCase__ )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase = [len(lowerCamelCase__ ) for x in speech_inputs]
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = feat_extract.pad(lowerCamelCase__ , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , lowerCamelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.feat_extract_dict
UpperCamelCase = True
UpperCamelCase = self.feature_extraction_class(**lowerCamelCase__ )
UpperCamelCase = self.feat_extract_tester.prepare_inputs_for_common()
UpperCamelCase = [len(lowerCamelCase__ ) for x in speech_inputs]
UpperCamelCase = feat_extract.model_input_names[0]
UpperCamelCase = BatchFeature({input_name: speech_inputs} )
UpperCamelCase = min(lowerCamelCase__ )
UpperCamelCase = feat_extract.pad(
lowerCamelCase__ , padding='''max_length''' , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''np''' )
self.assertIn('''attention_mask''' , lowerCamelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 212 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
A_ : Optional[Any] ="""https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
A_ : Union[str, Any] =BASE_URL + """/user"""
# https://github.com/settings/tokens
A_ : List[Any] =os.environ.get("""USER_TOKEN""", """""")
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> dict[Any, Any]:
_lowerCamelCase = {
'Authorization': f'token {auth_token}',
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(snake_case , headers=snake_case ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""")
| 222 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
A_ : List[str] =logging.get_logger(__name__)
A_ : Optional[int] ={
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Any = "perceiver"
def __init__( self , a__=2_56 , a__=12_80 , a__=7_68 , a__=1 , a__=26 , a__=8 , a__=8 , a__=None , a__=None , a__="kv" , a__=1 , a__=1 , a__="gelu" , a__=0.1 , a__=0.02 , a__=1e-12 , a__=True , a__=2_62 , a__=20_48 , a__=56 , a__=[3_68, 4_96] , a__=16 , a__=19_20 , a__=16 , a__=[1, 16, 2_24, 2_24] , **a__ , ):
super().__init__(**a__ )
_lowerCamelCase = num_latents
_lowerCamelCase = d_latents
_lowerCamelCase = d_model
_lowerCamelCase = num_blocks
_lowerCamelCase = num_self_attends_per_block
_lowerCamelCase = num_self_attention_heads
_lowerCamelCase = num_cross_attention_heads
_lowerCamelCase = qk_channels
_lowerCamelCase = v_channels
_lowerCamelCase = cross_attention_shape_for_attention
_lowerCamelCase = self_attention_widening_factor
_lowerCamelCase = cross_attention_widening_factor
_lowerCamelCase = hidden_act
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = use_query_residual
# masked language modeling attributes
_lowerCamelCase = vocab_size
_lowerCamelCase = max_position_embeddings
# image classification attributes
_lowerCamelCase = image_size
# flow attributes
_lowerCamelCase = train_size
# multimodal autoencoding attributes
_lowerCamelCase = num_frames
_lowerCamelCase = audio_samples_per_frame
_lowerCamelCase = samples_per_patch
_lowerCamelCase = output_shape
class __a ( lowerCAmelCase__ ):
@property
def snake_case_ ( self ):
if self.task == "multiple-choice":
_lowerCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def snake_case_ ( self ):
return 1e-4
def snake_case_ ( self , a__ , a__ = -1 , a__ = -1 , a__ = -1 , a__ = False , a__ = None , a__ = 3 , a__ = 40 , a__ = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(a__ , a__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCamelCase = preprocessor.num_special_tokens_to_add(a__ )
_lowerCamelCase = compute_effective_axis_dimension(
a__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a__ )
# Generate dummy inputs according to compute batch and sequence
_lowerCamelCase = [' '.join(['a'] ) * seq_length] * batch_size
_lowerCamelCase = dict(preprocessor(a__ , return_tensors=a__ ) )
_lowerCamelCase = inputs.pop('input_ids' )
return inputs
elif isinstance(a__ , a__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCamelCase = compute_effective_axis_dimension(a__ , fixed_dimension=OnnxConfig.default_fixed_batch )
_lowerCamelCase = self._generate_dummy_images(a__ , a__ , a__ , a__ )
_lowerCamelCase = dict(preprocessor(images=a__ , return_tensors=a__ ) )
_lowerCamelCase = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 222 | 1 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__SCREAMING_SNAKE_CASE = 1_6
__SCREAMING_SNAKE_CASE = 3_2
def __a ( lowerCAmelCase__ : Optional[int] ):
return int(x / 2**20 )
class lowerCAmelCase__ :
"""simple docstring"""
def __enter__( self : Any ) -> Optional[int]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
a__ : List[Any] = torch.cuda.memory_allocated()
return self
def __exit__( self : int , *A__ : Optional[int] ) -> int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
a__ : Any = torch.cuda.memory_allocated()
a__ : str = torch.cuda.max_memory_allocated()
a__ : Union[str, Any] = bamb(self.end - self.begin )
a__ : Any = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __a ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] = 16 , lowerCAmelCase__ : Optional[int] = "bert-base-cased" , lowerCAmelCase__ : str = 320 , lowerCAmelCase__ : str = 160 , ):
a__ : Optional[int] = AutoTokenizer.from_pretrained(snake_case__ )
a__ : Dict = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F'train[:{n_train}]', '''validation''': F'validation[:{n_val}]'} )
def tokenize_function(lowerCAmelCase__ : int ):
# max_length=None => use the model max length (it's actually the default)
a__ : Dict = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ : List[str] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(snake_case__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
a__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
a__ : List[str] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ):
# Initialize accelerator
a__ : List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : List[str] = config['''lr''']
a__ : Dict = int(config['''num_epochs'''] )
a__ : Dict = int(config['''seed'''] )
a__ : Tuple = int(config['''batch_size'''] )
a__ : str = args.model_name_or_path
set_seed(snake_case__ )
a__ : Dict = get_dataloaders(snake_case__ , snake_case__ , snake_case__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : Tuple = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
a__ : Optional[Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
a__ : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
a__ : Dict = 1
a__ : Optional[Any] = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
a__ : int = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ : Any = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
a__ : Any = 0
# We also need to keep track of the stating epoch so files are named properly
a__ : List[Any] = 0
# Now we train the model
a__ : Dict = {}
for epoch in range(snake_case__ , snake_case__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(snake_case__ ):
a__ : List[Any] = model(**snake_case__ )
a__ : List[str] = outputs.loss
a__ : str = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
a__ : Dict = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(snake_case__ , snake_case__ )
def __a ( ):
a__ : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case__ , )
parser.add_argument(
'''--output_dir''' , type=snake_case__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=snake_case__ , default=snake_case__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=snake_case__ , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=snake_case__ , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=snake_case__ , default=1 , help='''Number of train epochs.''' , )
a__ : Tuple = parser.parse_args()
a__ : Optional[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 688 |
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def A_ ( snake_case__ ) -> Union[str, Any]:
return x + 2
class A( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = '''x = 3'''
_UpperCamelCase :List[str] = {}
_UpperCamelCase :int = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3} )
_UpperCamelCase :Tuple = '''x = y'''
_UpperCamelCase :Optional[int] = {'''y''': 5}
_UpperCamelCase :Optional[Any] = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 5, '''y''': 5} )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :str = '''y = add_two(x)'''
_UpperCamelCase :Optional[int] = {'''x''': 3}
_UpperCamelCase :Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE__ )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
_UpperCamelCase :Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
assert result is None
assert "tried to execute add_two" in out.out
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Any = '''x = 3'''
_UpperCamelCase :int = {}
_UpperCamelCase :List[Any] = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3} )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :str = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
_UpperCamelCase :str = {'''x''': 3}
_UpperCamelCase :Dict = evaluate(SCREAMING_SNAKE_CASE__ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :int = '''x = 3\ny = 5'''
_UpperCamelCase :int = {}
_UpperCamelCase :Optional[int] = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''y''': 5} )
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = '''text = f\'This is x: {x}.\''''
_UpperCamelCase :List[str] = {'''x''': 3}
_UpperCamelCase :Any = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :Optional[Any] = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
_UpperCamelCase :Dict = {'''x''': 3}
_UpperCamelCase :Any = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''y''': 2} )
_UpperCamelCase :Union[str, Any] = {'''x''': 8}
_UpperCamelCase :Optional[int] = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 8, '''y''': 5} )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :List[str] = '''test_list = [x, add_two(x)]'''
_UpperCamelCase :Optional[int] = {'''x''': 3}
_UpperCamelCase :str = evaluate(SCREAMING_SNAKE_CASE__ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , [3, 5] )
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''test_list''': [3, 5]} )
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :List[Any] = '''y = x'''
_UpperCamelCase :List[str] = {'''x''': 3}
_UpperCamelCase :Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ , {} , state=SCREAMING_SNAKE_CASE__ )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''y''': 3} )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Tuple = '''test_list = [x, add_two(x)]\ntest_list[1]'''
_UpperCamelCase :Optional[Any] = {'''x''': 3}
_UpperCamelCase :Any = evaluate(SCREAMING_SNAKE_CASE__ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE__ )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''test_list''': [3, 5]} )
_UpperCamelCase :str = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
_UpperCamelCase :Optional[int] = {'''x''': 3}
_UpperCamelCase :Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE__ )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :str = '''x = 0\nfor i in range(3):\n x = i'''
_UpperCamelCase :Tuple = {}
_UpperCamelCase :Union[str, Any] = evaluate(SCREAMING_SNAKE_CASE__ , {'''range''': range} , state=SCREAMING_SNAKE_CASE__ )
assert result == 2
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {'''x''': 2, '''i''': 2} )
| 355 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure) | 228 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def lowercase (_snake_case ) -> int:
'''simple docstring'''
if not isinstance(_snake_case ,_snake_case ):
__UpperCamelCase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_snake_case )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(_snake_case ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 228 | 1 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
__UpperCamelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
__UpperCamelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def lowercase__ ( self : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : int=None , __magic_name__ : Tuple="uniform_average" , __magic_name__ : Tuple=True ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = mean_squared_error(
__magic_name__ , __magic_name__ , sample_weight=__magic_name__ , multioutput=__magic_name__ , squared=__magic_name__ )
return {"mse": mse}
| 26 |
'''simple docstring'''
def _lowercase ( UpperCamelCase__ : dict ):
__A : Dict = set()
# edges = list of graph's edges
__A : Any = get_edges(UpperCamelCase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__A ,__A : List[Any] = edges.pop()
chosen_vertices.add(UpperCamelCase__ )
chosen_vertices.add(UpperCamelCase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(UpperCamelCase__ )
return chosen_vertices
def _lowercase ( UpperCamelCase__ : dict ):
__A : Any = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 365 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None ):
return field(default_factory=lambda: default , metadata=snake_case_ )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = field(
metadata={"help": "The csv file to plot."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
a_ = field(
default=UpperCamelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
a_ = list_field(
default=UpperCamelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
try:
int(snake_case_ )
return True
except ValueError:
return False
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
try:
float(snake_case_ )
return True
except ValueError:
return False
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : str , __A : List[str] ):
snake_case__ : List[str] = args
snake_case__ : Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
snake_case__ : Tuple = csv.DictReader(__A )
for row in reader:
snake_case__ : List[str] = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
snake_case__ : Tuple = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
snake_case__ : Any = float(row["result"] )
def _lowercase ( self : Any ):
snake_case__, snake_case__ : Optional[Any] = plt.subplots()
snake_case__ : Tuple = "Time usage" if self.args.is_time else "Memory usage"
snake_case__ : str = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
snake_case__ : Tuple = sorted(set(self.result_dict[model_name]["bsz"] ) )
snake_case__ : Optional[int] = sorted(set(self.result_dict[model_name]["seq_len"] ) )
snake_case__ : str = self.result_dict[model_name]["result"]
((snake_case__), (snake_case__)) : List[str] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
snake_case__ : Tuple = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
snake_case__ : Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__A , )
else:
snake_case__ : str = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((snake_case__), (snake_case__)) : str = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
snake_case__ : Optional[int] = np.asarray(__A , __A )[: len(__A )]
plt.scatter(
__A , __A , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__A , __A , "--" )
title_str += f''' {label_model_name} vs.'''
snake_case__ : str = title_str[:-4]
snake_case__ : Optional[int] = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(__A )
plt.xlabel(__A )
plt.ylabel(__A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Union[str, Any] = HfArgumentParser(snake_case_ )
snake_case__ : Dict = parser.parse_args_into_dataclasses()[0]
snake_case__ : Optional[Any] = Plot(args=snake_case_ )
plot.plot()
if __name__ == "__main__":
main()
| 25 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 1 |
from math import isqrt
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> bool:
"""simple docstring"""
return all(number % divisor != 0 for divisor in range(2 , isqrt(a__) + 1))
def lowerCAmelCase_ ( _lowercase : Optional[int] = 10**6) -> int:
"""simple docstring"""
a__ : Union[str, Any] = 0
a__ : Any = 1
a__ : int = 7
while prime_candidate < max_prime:
primes_count += is_prime(a__)
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 136 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __A( unittest.TestCase ):
def __init__( self , _snake_case ) -> Dict:
'''simple docstring'''
__a = parent
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return {}
def __lowerCAmelCase ( ) -> Dict:
__a = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__a = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __A( a , unittest.TestCase ):
snake_case_ = MarkupLMFeatureExtractor if is_bsa_available() else None
def SCREAMING_SNAKE_CASE_ ( self ) -> Any:
'''simple docstring'''
__a = MarkupLMFeatureExtractionTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.feature_extraction_class()
# Test not batched input
__a = get_html_strings()[0]
__a = feature_extractor(_snake_case )
# fmt: off
__a = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__a = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , _snake_case )
self.assertEqual(encoding.xpaths , _snake_case )
# Test batched
__a = get_html_strings()
__a = feature_extractor(_snake_case )
# fmt: off
__a = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__a = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _snake_case )
self.assertEqual(encoding.xpaths , _snake_case ) | 219 | 0 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( lowercase : Dict , lowercase : Any , lowercase : Optional[Any]=None ) -> Any:
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
snake_case : int = nn.Parameter(__lowerCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
snake_case : Tuple = nn.Parameter(__lowerCAmelCase )
def __lowerCAmelCase ( lowercase : Optional[int] , lowercase : Any , lowercase : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case : Optional[int] = np.asarray(weights[0] )
snake_case : List[str] = np.asarray(weights[1] )
snake_case : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase ( lowercase : Tuple , lowercase : List[str] , lowercase : Tuple ) -> str:
"""simple docstring"""
snake_case : List[Any] = np.asarray(weights[0] )
snake_case : Union[str, Any] = np.asarray(weights[1] )
snake_case : Optional[Any] = np.asarray(weights[2] )
snake_case : List[Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , __lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowerCAmelCase ).view(-1 , __lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def __lowerCAmelCase ( lowercase : List[str] , lowercase : str , lowercase : Optional[Any] ) -> int:
"""simple docstring"""
snake_case : List[str] = weights[0][0][0]
snake_case : str = np.asarray(layer_norm_a[0] )
snake_case : List[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# lsh weights + output
snake_case : Union[str, Any] = weights[0][1]
if len(__lowerCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase )
else:
set_layer_weights_in_torch_local(__lowerCAmelCase , torch_block.attention , __lowerCAmelCase )
# intermediate weighs
snake_case : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowerCAmelCase ) == 4:
snake_case : int = intermediate_weights[2]
# layernorm 2
snake_case : List[Any] = np.asarray(intermediate_weights[0][0] )
snake_case : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# intermediate dense
snake_case : Tuple = np.asarray(intermediate_weights[1][0] )
snake_case : int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
# intermediate out
snake_case : str = np.asarray(intermediate_weights[4][0] )
snake_case : Union[str, Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
def __lowerCAmelCase ( lowercase : List[str] , lowercase : Optional[Any] , lowercase : Dict ) -> List[Any]:
"""simple docstring"""
snake_case : Optional[Any] = torch_model.reformer
# word embeds
snake_case : int = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowerCAmelCase ) , )
if isinstance(weights[3] , __lowerCAmelCase ):
snake_case : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case : int = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
snake_case : Tuple = nn.Parameter(torch.tensor(__lowerCAmelCase ) )
snake_case : str = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowerCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case : Union[str, Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# output layer norm
snake_case : Union[str, Any] = np.asarray(weights[7][0] )
snake_case : Tuple = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowerCAmelCase ) , torch.tensor(__lowerCAmelCase ) , )
# output embeddings
snake_case : Any = np.asarray(weights[9][0] )
snake_case : Dict = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowerCAmelCase ) , )
def __lowerCAmelCase ( lowercase : int , lowercase : Dict , lowercase : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case : Optional[int] = ReformerConfig.from_json_file(__lowerCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
snake_case : Optional[int] = ReformerModelWithLMHead(__lowerCAmelCase )
with open(__lowerCAmelCase , "rb" ) as f:
snake_case : Optional[Any] = pickle.load(__lowerCAmelCase )["""weights"""]
set_model_weights_in_torch(__lowerCAmelCase , __lowerCAmelCase , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 712 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
@property
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = self.dummy_uncond_unet
snake_case : str = PNDMScheduler()
snake_case : List[Any] = PNDMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pndm.to(UpperCamelCase__ )
pndm.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : str = torch.manual_seed(0 )
snake_case : Union[str, Any] = pndm(generator=UpperCamelCase__ , num_inference_steps=20 , output_type="numpy" ).images
snake_case : Optional[int] = torch.manual_seed(0 )
snake_case : Any = pndm(generator=UpperCamelCase__ , num_inference_steps=20 , output_type="numpy" , return_dict=UpperCamelCase__ )[0]
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Dict = "google/ddpm-cifar10-32"
snake_case : Optional[int] = UNetaDModel.from_pretrained(UpperCamelCase__ )
snake_case : List[str] = PNDMScheduler()
snake_case : Union[str, Any] = PNDMPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
pndm.to(UpperCamelCase__ )
pndm.set_progress_bar_config(disable=UpperCamelCase__ )
snake_case : int = torch.manual_seed(0 )
snake_case : str = pndm(generator=UpperCamelCase__ , output_type="numpy" ).images
snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : Optional[Any] = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 117 | 0 |
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = StableDiffusionControlNetImgaImgPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
a = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase_ ( self : str ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(_snake_case )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE__ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase_ ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : int=0 ) -> Any:
if str(_snake_case ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_snake_case )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , )
SCREAMING_SNAKE_CASE__ = floats_tensor(control_image.shape , rng=random.Random(_snake_case ) ).to(_snake_case )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
SCREAMING_SNAKE_CASE__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase_ ( self : Optional[int] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowerCAmelCase_ ( self : Any ) -> Tuple:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowerCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = StableDiffusionControlNetImgaImgPipeline
a = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_snake_case : Dict ):
if isinstance(_snake_case , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
SCREAMING_SNAKE_CASE__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(_snake_case )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE__ = MultiControlNetModel([controlneta, controlneta] )
SCREAMING_SNAKE_CASE__ = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCAmelCase_ ( self : Any , _snake_case : Optional[Any] , _snake_case : Optional[int]=0 ) -> Dict:
if str(_snake_case ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_snake_case )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case ) , ),
]
SCREAMING_SNAKE_CASE__ = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case ) ).to(_snake_case )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
SCREAMING_SNAKE_CASE__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def lowerCAmelCase_ ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
SCREAMING_SNAKE_CASE__ = 10.0
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_snake_case )
SCREAMING_SNAKE_CASE__ = steps
SCREAMING_SNAKE_CASE__ = scale
SCREAMING_SNAKE_CASE__ = pipe(**_snake_case )[0]
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_snake_case )
SCREAMING_SNAKE_CASE__ = steps
SCREAMING_SNAKE_CASE__ = scale
SCREAMING_SNAKE_CASE__ = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_snake_case )
SCREAMING_SNAKE_CASE__ = steps
SCREAMING_SNAKE_CASE__ = scale
SCREAMING_SNAKE_CASE__ = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_snake_case )
SCREAMING_SNAKE_CASE__ = steps
SCREAMING_SNAKE_CASE__ = scale
SCREAMING_SNAKE_CASE__ = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def lowerCAmelCase_ ( self : Tuple ) -> Optional[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase_ ( self : List[str] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowerCAmelCase_ ( self : Tuple ) -> Dict:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def lowerCAmelCase_ ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Any ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
SCREAMING_SNAKE_CASE__ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=_snake_case , controlnet=_snake_case )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case )
SCREAMING_SNAKE_CASE__ = torch.Generator(device="cpu" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = "evil space-punk bird"
SCREAMING_SNAKE_CASE__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
SCREAMING_SNAKE_CASE__ = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
SCREAMING_SNAKE_CASE__ = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type="np" , num_inference_steps=50 , strength=0.6 , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (512, 512, 3)
SCREAMING_SNAKE_CASE__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2
| 159 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a: int = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Dict = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Tuple = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__a: Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 108 | 0 |
from math import factorial
UpperCamelCase__ : Optional[Any] = {str(d): factorial(d) for d in range(10)}
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowerCamelCase_ ) if sum_of_digit_factorial(lowerCamelCase_ ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 712 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _A ( __lowercase ):
lowercase__: Optional[Any] = '''visual_bert'''
def __init__( self : Union[str, Any] , __magic_name__ : Any=3_05_22 , __magic_name__ : Union[str, Any]=7_68 , __magic_name__ : Any=5_12 , __magic_name__ : Dict=12 , __magic_name__ : Optional[Any]=12 , __magic_name__ : Optional[Any]=30_72 , __magic_name__ : Tuple="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : List[str]=5_12 , __magic_name__ : int=2 , __magic_name__ : str=0.02 , __magic_name__ : Dict=1E-12 , __magic_name__ : Any=False , __magic_name__ : List[str]=True , __magic_name__ : Union[str, Any]=1 , __magic_name__ : Dict=0 , __magic_name__ : Union[str, Any]=2 , **__magic_name__ : Tuple , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
__snake_case : Dict = vocab_size
__snake_case : Tuple = max_position_embeddings
__snake_case : int = hidden_size
__snake_case : Dict = visual_embedding_dim
__snake_case : Tuple = num_hidden_layers
__snake_case : str = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : int = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : int = attention_probs_dropout_prob
__snake_case : List[str] = initializer_range
__snake_case : Optional[Any] = type_vocab_size
__snake_case : Union[str, Any] = layer_norm_eps
__snake_case : Any = bypass_transformer
__snake_case : Dict = special_visual_initialize
| 26 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : Dict = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : str, lowerCamelCase__ : Optional[int]=None, lowerCamelCase__ : Any=None, lowerCamelCase__ : Tuple=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Any, lowerCamelCase__ : Union[str, Any]=[], lowerCamelCase__ : int=None, lowerCamelCase__ : str=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : List[str] ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Optional[int] ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : Union[str, Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : int ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> Any:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 131 | 0 |
def __lowercase( ) -> int:
return [
a * b * (10_00 - a - b)
for a in range(1 ,9_99 )
for b in range(__snake_case ,9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 711 |
lowerCamelCase_ : List[str] = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
lowerCamelCase_ : List[str] = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def __lowercase( __snake_case : float ,__snake_case : str ,__snake_case : str ) -> float:
__snake_case = from_type.lower().strip('s' )
__snake_case = to_type.lower().strip('s' )
__snake_case = UNIT_SYMBOL.get(__snake_case ,__snake_case )
__snake_case = UNIT_SYMBOL.get(__snake_case ,__snake_case )
if from_sanitized not in METRIC_CONVERSION:
__snake_case = (
f'''Invalid \'from_type\' value: {from_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(__snake_case )}'''
)
raise ValueError(__snake_case )
if to_sanitized not in METRIC_CONVERSION:
__snake_case = (
f'''Invalid \'to_type\' value: {to_type!r}.\n'''
f'''Conversion abbreviations are: {', '.join(__snake_case )}'''
)
raise ValueError(__snake_case )
__snake_case = METRIC_CONVERSION[from_sanitized]
__snake_case = METRIC_CONVERSION[to_sanitized]
__snake_case = 1
if from_exponent > to_exponent:
__snake_case = from_exponent - to_exponent
else:
__snake_case = -(to_exponent - from_exponent)
return value * pow(10 ,__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 345 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowercase_ = logging.get_logger(__name__)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : uuid.UUID = None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
if not conversation_id:
_a = uuid.uuida()
if past_user_inputs is None:
_a = []
if generated_responses is None:
_a = []
_a = conversation_id
_a = past_user_inputs
_a = generated_responses
_a = text
def __eq__( self : Any , SCREAMING_SNAKE_CASE_ : Any ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
_a = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
_a = text
def _UpperCAmelCase ( self : int ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
_a = None
def _UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
self.generated_responses.append(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : str ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : str ):
_a = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
_a = 'user' if is_user else 'bot'
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
lowerCamelCase__ , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ):
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.tokenizer.pad_token_id is None:
_a = self.tokenizer.eos_token
def _UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : Dict ):
_a = {}
_a = {}
_a = {}
if min_length_for_response is not None:
_a = min_length_for_response
if minimum_tokens is not None:
_a = minimum_tokens
if "max_length" in generate_kwargs:
_a = generate_kwargs['max_length']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_a = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(SCREAMING_SNAKE_CASE_ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[Conversation, List[Conversation]] , SCREAMING_SNAKE_CASE_ : str=0 , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
_a = super().__call__(SCREAMING_SNAKE_CASE_ , num_workers=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) == 1:
return outputs[0]
return outputs
def _UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Conversation , SCREAMING_SNAKE_CASE_ : Optional[int]=3_2 ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
_a = self.tokenizer._build_conversation_input_ids(SCREAMING_SNAKE_CASE_ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_a = self._legacy_parse_and_tokenize(SCREAMING_SNAKE_CASE_ )
if self.framework == "pt":
_a = torch.LongTensor([input_ids] )
elif self.framework == "tf":
_a = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=1_0 , **SCREAMING_SNAKE_CASE_ : int ):
_a = generate_kwargs.get('max_length' , self.model.config.max_length )
_a = model_inputs['input_ids'].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
_a = max_length - minimum_tokens
_a = model_inputs['input_ids'][:, -trim:]
if "attention_mask" in model_inputs:
_a = model_inputs['attention_mask'][:, -trim:]
_a = model_inputs.pop('conversation' )
_a = max_length
_a = self.model.generate(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.model.config.is_encoder_decoder:
_a = 1
else:
_a = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]=True ):
_a = model_outputs['output_ids']
_a = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ , )
_a = model_outputs['conversation']
conversation.mark_processed()
conversation.append_response(SCREAMING_SNAKE_CASE_ )
return conversation
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Conversation ):
_a = self.tokenizer.eos_token_id
_a = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > self.tokenizer.model_max_length:
_a = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 562 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = ["pixel_values"]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : Tuple=PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
_a = do_resize
_a = do_rescale
_a = size_divisor
_a = resample
super().__init__(**SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE_ : Dict ):
_a , _a = get_image_size(SCREAMING_SNAKE_CASE_ )
# Rounds the height and width down to the closest multiple of size_divisor
_a = height // size_divisor * size_divisor
_a = width // size_divisor * size_divisor
_a = resize(SCREAMING_SNAKE_CASE_ , (new_h, new_w) , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return image
def _UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[ChannelDimension] = None , **SCREAMING_SNAKE_CASE_ : Dict ):
return rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[TensorType, str]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : List[str] , ):
_a = do_resize if do_resize is not None else self.do_resize
_a = do_rescale if do_rescale is not None else self.do_rescale
_a = size_divisor if size_divisor is not None else self.size_divisor
_a = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
_a = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
_a = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for img in images]
if do_resize:
_a = [self.resize(SCREAMING_SNAKE_CASE_ , size_divisor=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
_a = [self.rescale(SCREAMING_SNAKE_CASE_ , scale=1 / 2_5_5 ) for image in images]
_a = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
_a = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 562 | 1 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any] ,*lowerCamelCase__ : str ,**lowerCamelCase__ : List[str] ) -> None:
'''simple docstring'''
warnings.warn(
"""The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use BeitImageProcessor instead.""" ,lowerCamelCase__ ,)
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
| 715 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Union[str, Any] = "ssube/stable-diffusion-x4-upscaler-onnx"
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : List[str]=0 ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 128, 128) ,rng=random.Random(lowerCamelCase__ ) )
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase__ ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
SCREAMING_SNAKE_CASE = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase__ ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase__ ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
SCREAMING_SNAKE_CASE = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase__ ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="""CPUExecutionProvider""" )
SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE = pipe(**lowerCamelCase__ ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ort.SessionOptions()
SCREAMING_SNAKE_CASE = False
return options
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
SCREAMING_SNAKE_CASE = init_image.resize((128, 128) )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCamelCase__ ,output_type="""np""" ,)
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
SCREAMING_SNAKE_CASE = init_image.resize((128, 128) )
SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,subfolder="""scheduler""" )
SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" ,scheduler=lowerCamelCase__ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation"""
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase__ ,image=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""np""" ,)
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 116 | 0 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class A_ :
pass
| 130 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = '''xlm-roberta-xl'''
def __init__( self , lowerCamelCase__=250_880 , lowerCamelCase__=2_560 , lowerCamelCase__=36 , lowerCamelCase__=32 , lowerCamelCase__=10_240 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=514 , lowerCamelCase__=1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowerCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 469 | 0 |
import random
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase , lowerCamelCase = [], [], []
for element in data:
if element < pivot:
less.append(UpperCAmelCase__ )
elif element > pivot:
greater.append(UpperCAmelCase__ )
else:
equal.append(UpperCAmelCase__ )
return less, equal, greater
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if index >= len(UpperCAmelCase__ ) or index < 0:
return None
lowerCamelCase = items[random.randint(0 , len(UpperCAmelCase__ ) - 1 )]
lowerCamelCase = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase = _partition(UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase = len(UpperCAmelCase__ )
lowerCamelCase = len(UpperCAmelCase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(UpperCAmelCase__ , UpperCAmelCase__ )
# must be in larger
else:
return quick_select(UpperCAmelCase__ , index - (m + count) ) | 719 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 42
_A = 42
class lowerCamelCase__ :
"""simple docstring"""
def __init__(self , __a ):
'''simple docstring'''
lowerCamelCase = [[] for _ in range(__a )]
lowerCamelCase = size
def __getitem__(self , __a ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a (self ):
'''simple docstring'''
return self._size
def _a (self , __a , __a , __a ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1." )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size)." )
self._graph[from_vertex].append(Edge(__a , __a ) )
def _a (self , __a , __a ):
'''simple docstring'''
lowerCamelCase = deque([start_vertex] )
lowerCamelCase = [None] * self.size
lowerCamelCase = 0
while queue:
lowerCamelCase = queue.popleft()
lowerCamelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowerCamelCase = current_distance + edge.weight
lowerCamelCase = distances[edge.destination_vertex]
if (
isinstance(__a , __a )
and new_distance >= dest_vertex_distance
):
continue
lowerCamelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex." )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 484 | 0 |
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
def __UpperCAmelCase ( A : int , A : Optional[int] ) -> int:
UpperCAmelCase_ : Any = RobertaPreLayerNormConfig.from_pretrained(
A , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
UpperCAmelCase_ : Any = torch.load(hf_hub_download(repo_id=A , filename='''pytorch_model.bin''' ) )
UpperCAmelCase_ : Dict = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
UpperCAmelCase_ : Dict = 'roberta_prelayernorm.' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
UpperCAmelCase_ : Optional[int] = tensor_value
UpperCAmelCase_ : Union[str, Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=A , config=A , state_dict=A )
model.save_pretrained(A )
# convert tokenizer
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(A )
tokenizer.save_pretrained(A )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase : List[str] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 541 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowerCAmelCase__: List[str] = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase__: Optional[Any] = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_0522, type=int)
lowerCAmelCase__: Union[str, Any] = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, "rb") as fp:
lowerCAmelCase__: List[Any] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
lowerCAmelCase__: List[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase__: Union[str, Any] = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase__: Tuple = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 345 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase_ :
lowercase = None
lowercase = None
lowercase = None # sigma(t_i)
@classmethod
def _lowercase( cls ) -> Union[str, Any]:
return cls()
@dataclass
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 42
lowercase = 42
lowercase = 42
class UpperCamelCase_ ( __magic_name__ , __magic_name__ ):
@property
def _lowercase( self ) -> Any:
return True
@register_to_config
def __init__( self , A = 0.0_2 , A = 100 , A = 1.0_0_7 , A = 80 , A = 0.0_5 , A = 50 , ) -> Optional[Any]:
pass
def _lowercase( self ) -> str:
return KarrasVeSchedulerState.create()
def _lowercase( self , A , A , A = () ) -> List[Any]:
UpperCAmelCase : int = jnp.arange(0 , _lowerCAmelCase )[::-1].copy()
UpperCAmelCase : Dict = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_lowerCAmelCase , schedule=jnp.array(_lowerCAmelCase , dtype=jnp.floataa ) , timesteps=_lowerCAmelCase , )
def _lowercase( self , A , A , A , A , ) -> int:
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : Dict = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase : Union[str, Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : List[Any] = random.split(_lowerCAmelCase , num=1 )
UpperCAmelCase : int = self.config.s_noise * random.normal(key=_lowerCAmelCase , shape=sample.shape )
UpperCAmelCase : str = sigma + gamma * sigma
UpperCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _lowercase( self , A , A , A , A , A , A = True , ) -> Optional[Any]:
UpperCAmelCase : str = sample_hat + sigma_hat * model_output
UpperCAmelCase : str = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : str = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowerCAmelCase , derivative=_lowerCAmelCase , state=_lowerCAmelCase )
def _lowercase( self , A , A , A , A , A , A , A , A = True , ) -> int:
UpperCAmelCase : str = sample_prev + sigma_prev * model_output
UpperCAmelCase : Any = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowerCAmelCase , derivative=_lowerCAmelCase , state=_lowerCAmelCase )
def _lowercase( self , A , A , A , A ) -> str:
raise NotImplementedError()
| 709 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672 | 0 |
"""simple docstring"""
def __a ( a ):
"""simple docstring"""
_a = len(a )
_a = sum(a )
_a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1, n + 1 ):
_a = True
for i in range(1, s + 1 ):
_a = False
for i in range(1, n + 1 ):
for j in range(1, s + 1 ):
_a = dp[i][j - 1]
if arr[i - 1] <= j:
_a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ), -1, -1 ):
if dp[n][j] is True:
_a = s - 2 * j
break
return diff
| 388 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __snake_case :
"""simple docstring"""
def __init__( self :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Optional[int]=13 , UpperCamelCase__ :Optional[Any]=7 , UpperCamelCase__ :Dict=True , UpperCamelCase__ :Union[str, Any]=True , UpperCamelCase__ :Any=True , UpperCamelCase__ :Tuple=True , UpperCamelCase__ :Dict=99 , UpperCamelCase__ :Union[str, Any]=32 , UpperCamelCase__ :Dict=2 , UpperCamelCase__ :List[str]=4 , UpperCamelCase__ :Any=37 , UpperCamelCase__ :int="gelu" , UpperCamelCase__ :str=0.1 , UpperCamelCase__ :Union[str, Any]=0.1 , UpperCamelCase__ :Optional[Any]=512 , UpperCamelCase__ :Optional[Any]=16 , UpperCamelCase__ :Optional[Any]=2 , UpperCamelCase__ :Optional[Any]=0.02 , UpperCamelCase__ :List[Any]=False , UpperCamelCase__ :Union[str, Any]=True , UpperCamelCase__ :Optional[int]="None" , UpperCamelCase__ :Any=3 , UpperCamelCase__ :Optional[int]=4 , UpperCamelCase__ :List[str]=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=UpperCamelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self :Dict , UpperCamelCase__ :int , UpperCamelCase__ :int , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :int ):
_a = TFDebertaVaModel(config=UpperCamelCase__ )
_a = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
_a = [input_ids, input_mask]
_a = model(UpperCamelCase__ )
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :int , UpperCamelCase__ :str , UpperCamelCase__ :str , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :List[str] ):
_a = TFDebertaVaForMaskedLM(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self :Any , UpperCamelCase__ :str , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :str , UpperCamelCase__ :Dict , UpperCamelCase__ :Tuple , UpperCamelCase__ :Any ):
_a = self.num_labels
_a = TFDebertaVaForSequenceClassification(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :Union[str, Any] , UpperCamelCase__ :int , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :int , UpperCamelCase__ :Any , UpperCamelCase__ :int , UpperCamelCase__ :Optional[Any] ):
_a = self.num_labels
_a = TFDebertaVaForTokenClassification(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :Dict , UpperCamelCase__ :Dict , UpperCamelCase__ :str , UpperCamelCase__ :Optional[int] , UpperCamelCase__ :Tuple , UpperCamelCase__ :List[str] , UpperCamelCase__ :Any ):
_a = TFDebertaVaForQuestionAnswering(config=UpperCamelCase__ )
_a = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
_a = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : str = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = TFDebertaVaModelTester(self )
_a = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self :str ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Any ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
_a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self :str ):
_a = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
_a = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_a = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_a = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
_a = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 )
| 388 | 1 |
def a (_lowerCAmelCase ):
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = credit_card_number
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase ) - 2
for i in range(_lowerCAmelCase , -1 , -2 ):
# double the value of every second digit
SCREAMING_SNAKE_CASE_ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
SCREAMING_SNAKE_CASE_ = cc_number[:i] + str(_lowerCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_lowerCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def a (_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 1_3 <= len(_lowerCAmelCase ) <= 1_6:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_lowerCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_lowerCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 89 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 89 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = "▁"
_lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
_lowerCAmelCase = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
_lowerCAmelCase = {
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
_lowerCAmelCase = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = VOCAB_FILES_NAMES
UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase = ["input_ids", "attention_mask"]
UpperCAmelCase = []
UpperCAmelCase = []
def __init__( self : int , _A : Union[str, Any] , _A : Optional[int]="<s>" , _A : str="</s>" , _A : Optional[int]="</s>" , _A : Tuple="<s>" , _A : Dict="<unk>" , _A : List[str]="<pad>" , _A : Dict="<mask>" , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Any=None , _A : Optional[Dict[str, Any]] = None , _A : List[str]=None , _A : str=False , **_A : int , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCamelCase = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCamelCase = legacy_behaviour
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , tokenizer_file=_A , src_lang=_A , tgt_lang=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_A , **_A , )
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
_UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCamelCase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCamelCase = 1
_UpperCamelCase = len(self.sp_model )
_UpperCamelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A )
}
_UpperCamelCase = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCamelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCamelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_UpperCamelCase = src_lang if src_lang is not None else '''eng_Latn'''
_UpperCamelCase = self.lang_code_to_id[self._src_lang]
_UpperCamelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ):
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
_UpperCamelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , _A : Any ):
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCamelCase_ ( self : Optional[int] ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase_ ( self : int ):
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : int , _A : str ):
_UpperCamelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
_UpperCamelCase = [1] * len(self.prefix_tokens )
_UpperCamelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def UpperCamelCase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ):
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase_ ( self : Optional[int] , _A : str , _A : str , _A : Optional[str] , _A : Optional[str] , **_A : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_UpperCamelCase = src_lang
_UpperCamelCase = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
_UpperCamelCase = self.convert_tokens_to_ids(_A )
_UpperCamelCase = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : int , _A : str ):
return self.sp_model.encode(_A , out_type=_A )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCamelCase = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase_ ( self : Union[str, Any] , _A : str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self : List[Any] , _A : Dict ):
_UpperCamelCase = ''''''.join(_A ).replace(_A , ''' ''' ).strip()
return out_string
def UpperCamelCase_ ( self : Union[str, Any] , _A : str , _A : Optional[str] = None ):
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCamelCase = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def UpperCamelCase_ ( self : Union[str, Any] , _A : List[str] , _A : str = "eng_Latn" , _A : Optional[List[str]] = None , _A : str = "fra_Latn" , **_A : Optional[int] , ):
_UpperCamelCase = src_lang
_UpperCamelCase = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def UpperCamelCase_ ( self : Union[str, Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : Optional[int] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : Optional[int] , _A : Tuple ):
_UpperCamelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_UpperCamelCase = []
_UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase = [self.cur_lang_code]
_UpperCamelCase = [self.eos_token_id]
def UpperCamelCase_ ( self : Optional[Any] , _A : str ):
_UpperCamelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_UpperCamelCase = []
_UpperCamelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCamelCase = [self.cur_lang_code]
_UpperCamelCase = [self.eos_token_id]
| 10 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : List[str] = """lilt"""
def __init__( self , __snake_case=3_0522 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=3072 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=2 , __snake_case=0.02 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=None , __snake_case=4 , __snake_case=1024 , **__snake_case , ):
super().__init__(pad_token_id=__snake_case , **__snake_case )
_SCREAMING_SNAKE_CASE : str = vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
_SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Dict = intermediate_size
_SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Dict = type_vocab_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
_SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
_SCREAMING_SNAKE_CASE : str = position_embedding_type
_SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
_SCREAMING_SNAKE_CASE : int = channel_shrink_ratio
_SCREAMING_SNAKE_CASE : str = max_ad_position_embeddings
| 533 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Optional[int] = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = "roc_bert"
def __init__(self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=True , A=0 , A="absolute" , A=None , A=True , A=True , A=7_6_8 , A=9_1_0 , A=5_1_2 , A=2_4_8_5_8 , A=True , **A , ):
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : int = max_position_embeddings
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : Dict = num_attention_heads
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : List[str] = hidden_act
lowerCamelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase_ : str = attention_probs_dropout_prob
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : Any = type_vocab_size
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Optional[Any] = use_cache
lowerCamelCase_ : Tuple = enable_pronunciation
lowerCamelCase_ : Tuple = enable_shape
lowerCamelCase_ : List[Any] = pronunciation_embed_dim
lowerCamelCase_ : List[Any] = pronunciation_vocab_size
lowerCamelCase_ : List[str] = shape_embed_dim
lowerCamelCase_ : List[Any] = shape_vocab_size
lowerCamelCase_ : str = concat_input
lowerCamelCase_ : Optional[Any] = position_embedding_type
lowerCamelCase_ : List[str] = classifier_dropout
super().__init__(pad_token_id=A , **A )
| 721 |
'''simple docstring'''
import itertools
import math
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase_ ( ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = 2
while True:
if is_prime(_lowercase ):
yield num
num += 1
def lowercase_ ( _lowercase = 10_001 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , _lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 357 | 0 |
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
def __init__( self ):
_lowercase : Optional[int] = {}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1 ):
if self.graph.get(_lowerCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_lowercase : Tuple = [[w, v]]
if not self.graph.get(_lowerCAmelCase ):
_lowercase : Optional[Any] = []
def __a ( self ):
return list(self.graph )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
if self.graph.get(_lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase=-2 , _lowerCAmelCase=-1 ):
if s == d:
return []
_lowercase : str = []
_lowercase : Union[str, Any] = []
if s == -2:
_lowercase : str = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowerCAmelCase ) != 0:
_lowercase : int = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : Optional[int] = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return visited
def __a ( self , _lowerCAmelCase=-1 ):
if c == -1:
_lowercase : Dict = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_lowercase : str = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowerCAmelCase , _lowerCAmelCase , 1 )
def __a ( self , _lowerCAmelCase=-2 ):
_lowercase : str = deque()
_lowercase : Optional[Any] = []
if s == -2:
_lowercase : List[Any] = list(self.graph )[0]
d.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
while d:
_lowercase : Tuple = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __a ( self , _lowerCAmelCase ):
_lowercase : str = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __a ( self , _lowerCAmelCase ):
return len(self.graph[u] )
def __a ( self , _lowerCAmelCase=-2 ):
_lowercase : Dict = []
_lowercase : Tuple = []
if s == -2:
_lowercase : Union[str, Any] = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : List[Any] = s
_lowercase : Dict = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : str = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_lowerCAmelCase ) != 0:
_lowercase : str = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : int = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return sorted_nodes
def __a ( self ):
_lowercase : Tuple = []
_lowercase : Tuple = []
_lowercase : Dict = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : Optional[int] = -2
_lowercase : Tuple = []
_lowercase : Dict = s
_lowercase : List[str] = False
_lowercase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowercase : Union[str, Any] = len(_lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowercase : int = True
if len(_lowerCAmelCase ) != 0:
_lowercase : List[Any] = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : Tuple = False
indirect_parents.append(_lowerCAmelCase )
_lowercase : Dict = s
_lowercase : Union[str, Any] = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return list(_lowerCAmelCase )
def __a ( self ):
_lowercase : Optional[Any] = []
_lowercase : Optional[Any] = []
_lowercase : List[Any] = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : Dict = -2
_lowercase : Union[str, Any] = []
_lowercase : int = s
_lowercase : List[str] = False
_lowercase : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowercase : str = len(_lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowercase : Dict = True
if len(_lowerCAmelCase ) != 0:
_lowercase : Union[str, Any] = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : Any = False
indirect_parents.append(_lowerCAmelCase )
_lowercase : Optional[Any] = s
_lowercase : Any = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return False
def __a ( self , _lowerCAmelCase=-2 , _lowerCAmelCase=-1 ):
_lowercase : Optional[int] = time()
self.dfs(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = time()
return end - begin
def __a ( self , _lowerCAmelCase=-2 ):
_lowercase : str = time()
self.bfs(_lowerCAmelCase )
_lowercase : str = time()
return end - begin
class lowerCAmelCase_ :
def __init__( self ):
_lowercase : str = {}
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=1 ):
# check if the u exists
if self.graph.get(_lowerCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_lowercase : Any = [[w, v]]
# add the other way
if self.graph.get(_lowerCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_lowercase : List[Any] = [[w, u]]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
if self.graph.get(_lowerCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_lowerCAmelCase )
# the other way round
if self.graph.get(_lowerCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase=-2 , _lowerCAmelCase=-1 ):
if s == d:
return []
_lowercase : Dict = []
_lowercase : Dict = []
if s == -2:
_lowercase : Tuple = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : str = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : Any = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_lowerCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_lowerCAmelCase ) != 0:
_lowercase : Any = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : Tuple = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return visited
def __a ( self , _lowerCAmelCase=-1 ):
if c == -1:
_lowercase : Dict = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_lowerCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_lowercase : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_lowerCAmelCase , _lowerCAmelCase , 1 )
def __a ( self , _lowerCAmelCase=-2 ):
_lowercase : Optional[int] = deque()
_lowercase : Any = []
if s == -2:
_lowercase : Optional[int] = list(self.graph )[0]
d.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
while d:
_lowercase : Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __a ( self , _lowerCAmelCase ):
return len(self.graph[u] )
def __a ( self ):
_lowercase : Any = []
_lowercase : Optional[Any] = []
_lowercase : Optional[int] = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : Optional[int] = -2
_lowercase : Dict = []
_lowercase : int = s
_lowercase : Dict = False
_lowercase : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : Any = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowercase : str = len(_lowerCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowercase : Any = True
if len(_lowerCAmelCase ) != 0:
_lowercase : List[Any] = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : Union[str, Any] = False
indirect_parents.append(_lowerCAmelCase )
_lowercase : int = s
_lowercase : Any = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return list(_lowerCAmelCase )
def __a ( self ):
_lowercase : Union[str, Any] = []
_lowercase : Dict = []
_lowercase : List[str] = list(self.graph )[0]
stack.append(_lowerCAmelCase )
visited.append(_lowerCAmelCase )
_lowercase : Any = -2
_lowercase : Optional[Any] = []
_lowercase : List[str] = s
_lowercase : Optional[int] = False
_lowercase : Tuple = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_lowercase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_lowercase : Tuple = len(_lowerCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_lowercase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_lowercase : str = True
if len(_lowerCAmelCase ) != 0:
_lowercase : List[str] = stack[len(_lowerCAmelCase ) - 1]
else:
_lowercase : Optional[Any] = False
indirect_parents.append(_lowerCAmelCase )
_lowercase : Optional[int] = s
_lowercase : List[str] = ss
# check if se have reached the starting point
if len(_lowerCAmelCase ) == 0:
return False
def __a ( self ):
return list(self.graph )
def __a ( self , _lowerCAmelCase=-2 , _lowerCAmelCase=-1 ):
_lowercase : Any = time()
self.dfs(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Optional[int] = time()
return end - begin
def __a ( self , _lowerCAmelCase=-2 ):
_lowercase : Dict = time()
self.bfs(_lowerCAmelCase )
_lowercase : int = time()
return end - begin
| 66 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
snake_case_ : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowercase )
class lowercase__ ( lowercase ):
def __init__( self : Tuple ,**lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
# No specific FOR_XXX available yet
def __call__( self : Dict ,lowerCamelCase__ : Union[np.ndarray, bytes, str] ,**lowerCamelCase__ : Any ):
'''simple docstring'''
return super().__call__(lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : int ,**lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = {}
if "candidate_labels" in kwargs:
_UpperCamelCase : Any = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_UpperCamelCase : Any = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : str=None ,lowerCamelCase__ : List[Any]="This is a sound of {}." ):
'''simple docstring'''
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_UpperCamelCase : Optional[Any] = requests.get(lowerCamelCase__ ).content
else:
with open(lowerCamelCase__ ,'rb' ) as f:
_UpperCamelCase : List[str] = f.read()
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : int = ffmpeg_read(lowerCamelCase__ ,self.feature_extractor.sampling_rate )
if not isinstance(lowerCamelCase__ ,np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
_UpperCamelCase : Dict = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='pt' )
_UpperCamelCase : List[Any] = candidate_labels
_UpperCamelCase : Tuple = [hypothesis_template.format(lowerCamelCase__ ) for x in candidate_labels]
_UpperCamelCase : Dict = self.tokenizer(lowerCamelCase__ ,return_tensors=self.framework ,padding=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = [text_inputs]
return inputs
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
_UpperCamelCase : Any = model_inputs.pop('candidate_labels' )
_UpperCamelCase : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = text_inputs[0]
else:
# Batching case.
_UpperCamelCase : int = text_inputs[0][0]
_UpperCamelCase : List[str] = self.model(**lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = model_outputs.pop('candidate_labels' )
_UpperCamelCase : int = model_outputs['logits'][0]
if self.framework == "pt":
_UpperCamelCase : Tuple = logits.softmax(dim=0 )
_UpperCamelCase : str = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
_UpperCamelCase : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase__ ,lowerCamelCase__ ) ,key=lambda lowerCamelCase__ : -x[0] )
]
return result
| 195 | 0 |
from typing import Any
class A :
"""simple docstring"""
def __init__( self : Dict,lowercase_ : Any )-> Union[str, Any]:
'''simple docstring'''
A__ = data
A__ = None
def __repr__( self : Optional[int] )-> str:
'''simple docstring'''
return F'Node({self.data})'
class A :
"""simple docstring"""
def __init__( self : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
A__ = None
def __iter__( self : Union[str, Any] )-> Any:
'''simple docstring'''
A__ = self.head
while node:
yield node.data
A__ = node.next
def __len__( self : List[str] )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : Optional[Any] )-> str:
'''simple docstring'''
return "->".join([str(lowercase_ ) for item in self] )
def __getitem__( self : Dict,lowercase_ : int )-> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any],lowercase_ : int,lowercase_ : Any )-> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
A__ = self.head
for _ in range(lowercase_ ):
A__ = current.next
A__ = data
def snake_case__ ( self : Any,lowercase_ : Any )-> None:
'''simple docstring'''
self.insert_nth(len(self ),lowercase_ )
def snake_case__ ( self : str,lowercase_ : Any )-> None:
'''simple docstring'''
self.insert_nth(0,lowercase_ )
def snake_case__ ( self : List[Any],lowercase_ : int,lowercase_ : Any )-> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
A__ = Node(lowercase_ )
if self.head is None:
A__ = new_node
elif index == 0:
A__ = self.head # link new_node to head
A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
def snake_case__ ( self : Tuple )-> None: # print every node data
'''simple docstring'''
print(self )
def snake_case__ ( self : Any )-> Any:
'''simple docstring'''
return self.delete_nth(0 )
def snake_case__ ( self : Union[str, Any] )-> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case__ ( self : Union[str, Any],lowercase_ : int = 0 )-> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
A__ = self.head # default first node
if index == 0:
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
return delete_node.data
def snake_case__ ( self : Optional[int] )-> bool:
'''simple docstring'''
return self.head is None
def snake_case__ ( self : Optional[Any] )-> None:
'''simple docstring'''
A__ = None
A__ = self.head
while current:
# Store the current node's next node.
A__ = current.next
# Make the current node's next point backwards
A__ = prev
# Make the previous node be the current node
A__ = current
# Make the current node the next node (to progress iteration)
A__ = next_node
# Return prev in order to put the head at the end
A__ = prev
def _snake_case( ) -> None:
'''simple docstring'''
A__ = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(SCREAMING_SNAKE_CASE__ ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(SCREAMING_SNAKE_CASE__ ) == 9
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
A__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(-8 , 1 ) )
def _snake_case( ) -> None:
'''simple docstring'''
A__ = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
A__ = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A__ = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A__ = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _snake_case( ) -> Tuple:
'''simple docstring'''
from doctest import testmod
testmod()
A__ = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(SCREAMING_SNAKE_CASE__ )
print('\nReading/changing Node data using indexing:' )
print(f'Element at Position 1: {linked_list[1]}' )
A__ = input('Enter New Value: ' ).strip()
print('New list:' )
print(SCREAMING_SNAKE_CASE__ )
print(f'length of linked_list is : {len(SCREAMING_SNAKE_CASE__ )}' )
if __name__ == "__main__":
main()
| 718 |
from ..utils import DummyObject, requires_backends
class A ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = ['transformers', 'torch', 'note_seq']
def __init__( self : Tuple,*lowercase_ : Any,**lowercase_ : Dict )-> Union[str, Any]:
'''simple docstring'''
requires_backends(self,['transformers', 'torch', 'note_seq'] )
@classmethod
def snake_case__ ( cls : List[str],*lowercase_ : int,**lowercase_ : Optional[int] )-> Any:
'''simple docstring'''
requires_backends(cls,['transformers', 'torch', 'note_seq'] )
@classmethod
def snake_case__ ( cls : Dict,*lowercase_ : Tuple,**lowercase_ : List[str] )-> Dict:
'''simple docstring'''
requires_backends(cls,['transformers', 'torch', 'note_seq'] )
| 586 | 0 |
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , ):
"""simple docstring"""
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
_snake_case , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 565 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase : Optional[int] = '''ViTImageProcessor'''
UpperCAmelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Dict ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_A = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
_A = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_A = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 7 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"
)
UpperCamelCase = None
UpperCamelCase = {
"7B": 11_008,
"13B": 13_824,
"30B": 17_920,
"65B": 22_016,
"70B": 28_672,
}
UpperCamelCase = {
"7B": 1,
"7Bf": 1,
"13B": 2,
"13Bf": 2,
"30B": 4,
"65B": 8,
"70B": 8,
"70Bf": 8,
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=256 ) -> Union[str, Any]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Any:
with open(SCREAMING_SNAKE_CASE , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
with open(SCREAMING_SNAKE_CASE , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ) -> int:
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , 'tmp' )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
_lowercase : Dict = read_json(os.path.join(SCREAMING_SNAKE_CASE , 'params.json' ) )
_lowercase : List[Any] = NUM_SHARDS[model_size]
_lowercase : Optional[int] = params['n_layers']
_lowercase : Optional[int] = params['n_heads']
_lowercase : List[Any] = n_heads // num_shards
_lowercase : Any = params['dim']
_lowercase : Union[str, Any] = dim // n_heads
_lowercase : Optional[int] = 1_0000.0
_lowercase : int = 1.0 / (base ** (torch.arange(0 , SCREAMING_SNAKE_CASE , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowercase : Optional[Any] = params['n_kv_heads'] # for GQA / MQA
_lowercase : Any = n_heads_per_shard // num_key_value_heads
_lowercase : str = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowercase : Optional[int] = n_heads
_lowercase : Tuple = n_heads_per_shard
_lowercase : Tuple = dim
# permute for sliced rotary
def permute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=n_heads , SCREAMING_SNAKE_CASE=dim , SCREAMING_SNAKE_CASE=dim ):
return w.view(SCREAMING_SNAKE_CASE , dima // n_heads // 2 , 2 , SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowercase : Optional[int] = torch.load(os.path.join(SCREAMING_SNAKE_CASE , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
_lowercase : Union[str, Any] = [
torch.load(os.path.join(SCREAMING_SNAKE_CASE , F"""consolidated.{i:02d}.pth""" ) , map_location='cpu' )
for i in range(SCREAMING_SNAKE_CASE )
]
_lowercase : int = 0
_lowercase : Optional[int] = {'weight_map': {}}
for layer_i in range(SCREAMING_SNAKE_CASE ):
_lowercase : Optional[int] = F"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
_lowercase : List[str] = {
F"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wq.weight"""] ),
F"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[F"""layers.{layer_i}.attention.wk.weight"""] ),
F"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[F"""layers.{layer_i}.attention.wv.weight"""],
F"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[F"""layers.{layer_i}.attention.wo.weight"""],
F"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w1.weight"""],
F"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w2.weight"""],
F"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[F"""layers.{layer_i}.feed_forward.w3.weight"""],
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[F"""layers.{layer_i}.attention_norm.weight"""],
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[F"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowercase : Tuple = {
F"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.attention_norm.weight"""
].clone(),
F"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
F"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
_lowercase : List[Any] = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wq.weight"""].view(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_lowercase : Any = permute(
torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wk.weight"""].view(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
_lowercase : Dict = torch.cat(
[
loaded[i][F"""layers.{layer_i}.attention.wv.weight"""].view(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[str] = torch.cat(
[loaded[i][F"""layers.{layer_i}.attention.wo.weight"""] for i in range(SCREAMING_SNAKE_CASE )] , dim=1 )
_lowercase : Any = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(SCREAMING_SNAKE_CASE )] , dim=0 )
_lowercase : str = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(SCREAMING_SNAKE_CASE )] , dim=1 )
_lowercase : List[str] = torch.cat(
[loaded[i][F"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(SCREAMING_SNAKE_CASE )] , dim=0 )
_lowercase : int = inv_freq
for k, v in state_dict.items():
_lowercase : Optional[int] = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
_lowercase : Optional[int] = F"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
_lowercase : Optional[Any] = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
_lowercase : Optional[Any] = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(SCREAMING_SNAKE_CASE )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(SCREAMING_SNAKE_CASE )] , dim=0 ),
}
for k, v in state_dict.items():
_lowercase : Any = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Write configs
_lowercase : List[Any] = {'total_size': param_count * 2}
write_json(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , 'pytorch_model.bin.index.json' ) )
_lowercase : Dict = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
_lowercase : Dict = params['multiple_of'] if 'multiple_of' in params else 256
_lowercase : int = LlamaConfig(
hidden_size=SCREAMING_SNAKE_CASE , intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=SCREAMING_SNAKE_CASE , )
config.save_pretrained(SCREAMING_SNAKE_CASE )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
_lowercase : int = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , low_cpu_mem_usage=SCREAMING_SNAKE_CASE )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(SCREAMING_SNAKE_CASE , safe_serialization=SCREAMING_SNAKE_CASE )
shutil.rmtree(SCREAMING_SNAKE_CASE )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
# Initialize the tokenizer based on the `spm` model
_lowercase : Dict = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
_lowercase : List[str] = tokenizer_class(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> Union[str, Any]:
_lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=SCREAMING_SNAKE_CASE , help='Whether or not to save using `safetensors`.' )
_lowercase : List[str] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowercase : Optional[int] = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 677 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
for attribute in key.split('.' ):
_lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
_lowercase : Optional[int] = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
_lowercase : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowercase : List[str] = value
elif weight_type == "weight_g":
_lowercase : Any = value
elif weight_type == "weight_v":
_lowercase : Tuple = value
elif weight_type == "bias":
_lowercase : List[str] = value
else:
_lowercase : Dict = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Optional[int] = []
_lowercase : Optional[int] = fairseq_model.state_dict()
_lowercase : Dict = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowercase : Dict = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
_lowercase : int = True
else:
for key, mapped_key in MAPPING.items():
_lowercase : Union[str, Any] = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
_lowercase : Union[str, Any] = True
if "*" in mapped_key:
_lowercase : Dict = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
_lowercase : Dict = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
_lowercase : Optional[int] = 'weight_g'
elif "weight_v" in name:
_lowercase : Optional[Any] = 'weight_v'
elif "weight" in name:
_lowercase : str = 'weight'
elif "bias" in name:
_lowercase : Any = 'bias'
else:
_lowercase : str = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
_lowercase : Any = full_name.split('conv_layers.' )[-1]
_lowercase : Any = name.split('.' )
_lowercase : Optional[Any] = int(items[0] )
_lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowercase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowercase : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowercase : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowercase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
if config_path is not None:
_lowercase : Optional[int] = HubertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertConfig()
if is_finetuned:
if dict_path:
_lowercase : List[str] = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowercase : Dict = target_dict.pad_index
_lowercase : Dict = target_dict.bos_index
_lowercase : Tuple = target_dict.eos_index
_lowercase : List[Any] = len(target_dict.symbols )
_lowercase : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE )
_lowercase : int = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , )
_lowercase : str = True if config.feat_extract_norm == 'layer' else False
_lowercase : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
_lowercase : Tuple = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = HubertForCTC(SCREAMING_SNAKE_CASE )
else:
_lowercase : List[Any] = HubertModel(SCREAMING_SNAKE_CASE )
if is_finetuned:
_lowercase , _lowercase , _lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_lowercase , _lowercase , _lowercase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowercase : int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
UpperCamelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 677 | 1 |
'''simple docstring'''
class a :
'''simple docstring'''
def __init__( self ) -> List[Any]:
_a : Optional[int] = 0
_a : Tuple = 0
_a : str = {}
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Optional[int]:
if vertex not in self.adjacency:
_a : List[Any] = {}
self.num_vertices += 1
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
self.add_vertex(lowerCamelCase_ )
self.add_vertex(lowerCamelCase_ )
if head == tail:
return
_a : str = weight
_a : Any = weight
def __UpperCamelCase ( self ) -> List[str]:
_a : Union[str, Any] = self.get_edges()
for edge in edges:
_a , _a , _a : List[Any] = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase_ ) ):
_a : Optional[int] = list(edges[i] )
edges.sort(key=lambda lowerCamelCase_ : e[2] )
for i in range(len(lowerCamelCase_ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_a : Optional[int] = edges[i][2] + 1
for edge in edges:
_a , _a , _a : str = edge
_a : List[Any] = weight
_a : Optional[int] = weight
def __str__( self ) -> List[str]:
_a : List[str] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_a : Optional[Any] = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip('\n' )
def __UpperCamelCase ( self ) -> List[str]:
_a : Tuple = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __UpperCamelCase ( self ) -> Optional[int]:
return self.adjacency.keys()
@staticmethod
def __UpperCamelCase ( lowerCamelCase_=None , lowerCamelCase_=None ) -> Tuple:
_a : List[Any] = Graph()
if vertices is None:
_a : int = []
if edges is None:
_a : Dict = []
for vertex in vertices:
g.add_vertex(lowerCamelCase_ )
for edge in edges:
g.add_edge(*lowerCamelCase_ )
return g
class a :
'''simple docstring'''
def __init__( self ) -> List[Any]:
_a : Dict = {}
_a : Any = {}
def __len__( self ) -> Optional[Any]:
return len(self.parent )
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Any:
if item in self.parent:
return self.find(lowerCamelCase_ )
_a : Optional[Any] = item
_a : Any = 0
return item
def __UpperCamelCase ( self , lowerCamelCase_ ) -> str:
if item not in self.parent:
return self.make_set(lowerCamelCase_ )
if item != self.parent[item]:
_a : Any = self.find(self.parent[item] )
return self.parent[item]
def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Tuple:
_a : str = self.find(lowerCamelCase_ )
_a : Optional[int] = self.find(lowerCamelCase_ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_a : Tuple = roota
return roota
if self.rank[roota] < self.rank[roota]:
_a : Optional[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_a : Tuple = roota
return roota
return None
@staticmethod
def __UpperCamelCase ( lowerCamelCase_ ) -> str:
_a : str = graph.num_vertices
_a : List[str] = Graph.UnionFind()
_a : Any = []
while num_components > 1:
_a : Optional[Any] = {}
for vertex in graph.get_vertices():
_a : List[str] = -1
_a : str = graph.get_edges()
for edge in edges:
_a , _a , _a : Tuple = edge
edges.remove((tail, head, weight) )
for edge in edges:
_a , _a , _a : int = edge
_a : Any = union_find.find(lowerCamelCase_ )
_a : List[str] = union_find.find(lowerCamelCase_ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : Any = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_a : Optional[Any] = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_a , _a , _a : Dict = cheap_edge[vertex]
if union_find.find(lowerCamelCase_ ) != union_find.find(lowerCamelCase_ ):
union_find.union(lowerCamelCase_ , lowerCamelCase_ )
mst_edges.append(cheap_edge[vertex] )
_a : Tuple = num_components - 1
_a : Dict = Graph.build(edges=lowerCamelCase_ )
return mst
| 120 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : List[Any] = """segformer"""
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=[2, 2, 2, 2] , lowerCamelCase_=[8, 4, 2, 1] , lowerCamelCase_=[3_2, 6_4, 1_6_0, 2_5_6] , lowerCamelCase_=[7, 3, 3, 3] , lowerCamelCase_=[4, 2, 2, 2] , lowerCamelCase_=[1, 2, 5, 8] , lowerCamelCase_=[4, 4, 4, 4] , lowerCamelCase_="gelu" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=0.1 , lowerCamelCase_=1e-6 , lowerCamelCase_=2_5_6 , lowerCamelCase_=2_5_5 , **lowerCamelCase_ , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowerCamelCase_ , )
_a : Union[str, Any] = num_channels
_a : Any = num_encoder_blocks
_a : Union[str, Any] = depths
_a : int = sr_ratios
_a : List[str] = hidden_sizes
_a : Tuple = patch_sizes
_a : Any = strides
_a : List[Any] = mlp_ratios
_a : str = num_attention_heads
_a : str = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Any = classifier_dropout_prob
_a : Optional[Any] = initializer_range
_a : int = drop_path_rate
_a : int = layer_norm_eps
_a : Optional[Any] = decoder_hidden_size
_a : int = kwargs.get('reshape_last_stage' , lowerCamelCase_ )
_a : str = semantic_loss_ignore_index
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Any = version.parse("""1.11""" )
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCamelCase ( self ) -> float:
return 1e-4
@property
def __UpperCamelCase ( self ) -> int:
return 1_2
| 120 | 1 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = 42
_A = jnp.floataa
_A = True
def _a (self ):
'''simple docstring'''
super().setup()
lowerCamelCase = nn.Dense(5 , dtype=self.dtype )
def __call__(self , *__a , **__a ):
'''simple docstring'''
lowerCamelCase = super().__call__(*__a , **__a )
lowerCamelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = FlaxBigBirdForNaturalQuestionsModule
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
def cross_entropy(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ):
lowerCamelCase = logits.shape[-1]
lowerCamelCase = (labels[..., None] == jnp.arange(_snake_case )[None]).astype("f4" )
lowerCamelCase = jax.nn.log_softmax(_snake_case , axis=-1 )
lowerCamelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase = reduction(_snake_case )
return loss
lowerCamelCase = partial(_snake_case , reduction=jnp.mean )
lowerCamelCase = cross_entropy(_snake_case , _snake_case )
lowerCamelCase = cross_entropy(_snake_case , _snake_case )
lowerCamelCase = cross_entropy(_snake_case , _snake_case )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 'google/bigbird-roberta-base'
_A = 30_00
_A = 1_05_00
_A = 1_28
_A = 3
_A = 1
_A = 5
# tx_args
_A = 3e-5
_A = 0.0
_A = 2_00_00
_A = 0.00_95
_A = 'bigbird-roberta-natural-questions'
_A = 'training-expt'
_A = 'data/nq-training.jsonl'
_A = 'data/nq-validation.jsonl'
def _a (self ):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=__a )
lowerCamelCase = os.path.join(self.base_dir , self.save_dir )
lowerCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 42
_A = 40_96 # no dynamic padding on TPUs
def __call__(self , __a ):
'''simple docstring'''
lowerCamelCase = self.collate_fn(__a )
lowerCamelCase = jax.tree_util.tree_map(__a , __a )
return batch
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase = self.fetch_inputs(features["input_ids"] )
lowerCamelCase = {
"input_ids": jnp.array(__a , dtype=jnp.intaa ),
"attention_mask": jnp.array(__a , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = [self._fetch_inputs(__a ) for ids in input_ids]
return zip(*__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = [1 for _ in range(len(__a ) )]
while len(__a ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ):
"""simple docstring"""
if seed is not None:
lowerCamelCase = dataset.shuffle(seed=_snake_case )
for i in range(len(_snake_case ) // batch_size ):
lowerCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_snake_case )
@partial(jax.pmap , axis_name="batch" )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
"""simple docstring"""
def loss_fn(UpperCAmelCase__ ):
lowerCamelCase = model_inputs.pop("start_labels" )
lowerCamelCase = model_inputs.pop("end_labels" )
lowerCamelCase = model_inputs.pop("pooled_labels" )
lowerCamelCase = state.apply_fn(**_snake_case , params=_snake_case , dropout_rng=_snake_case , train=_snake_case )
lowerCamelCase , lowerCamelCase , lowerCamelCase = outputs
return state.loss_fn(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
lowerCamelCase , lowerCamelCase = jax.random.split(_snake_case )
lowerCamelCase = jax.value_and_grad(_snake_case )
lowerCamelCase , lowerCamelCase = grad_fn(state.params )
lowerCamelCase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
lowerCamelCase = jax.lax.pmean(_snake_case , "batch" )
lowerCamelCase = state.apply_gradients(grads=_snake_case )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def __lowercase( UpperCAmelCase__ , **UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = model_inputs.pop("start_labels" )
lowerCamelCase = model_inputs.pop("end_labels" )
lowerCamelCase = model_inputs.pop("pooled_labels" )
lowerCamelCase = state.apply_fn(**_snake_case , params=state.params , train=_snake_case )
lowerCamelCase , lowerCamelCase , lowerCamelCase = outputs
lowerCamelCase = state.loss_fn(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
lowerCamelCase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class lowerCamelCase__ ( train_state.TrainState):
"""simple docstring"""
_A = struct.field(pytree_node=UpperCAmelCase_)
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
_A = 42
_A = 42
_A = 42
_A = 42
_A = 42
_A = 42
_A = None
def _a (self , __a , __a , __a , __a=None ):
'''simple docstring'''
lowerCamelCase = model.params
lowerCamelCase = TrainState.create(
apply_fn=model.__call__ , params=__a , tx=__a , loss_fn=__a , )
if ckpt_dir is not None:
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = restore_checkpoint(__a , __a )
lowerCamelCase = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
lowerCamelCase , lowerCamelCase = build_tx(**__a )
lowerCamelCase = train_state.TrainState(
step=__a , apply_fn=model.__call__ , params=__a , tx=__a , opt_state=__a , )
lowerCamelCase = args
lowerCamelCase = data_collator
lowerCamelCase = lr
lowerCamelCase = params
lowerCamelCase = jax_utils.replicate(__a )
return state
def _a (self , __a , __a , __a ):
'''simple docstring'''
lowerCamelCase = self.args
lowerCamelCase = len(__a ) // args.batch_size
lowerCamelCase = jax.random.PRNGKey(0 )
lowerCamelCase = jax.random.split(__a , jax.device_count() )
for epoch in range(args.max_epochs ):
lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
lowerCamelCase = get_batched_dataset(__a , args.batch_size , seed=__a )
lowerCamelCase = 0
for batch in tqdm(__a , total=__a , desc=F"""Running EPOCH-{epoch}""" ):
lowerCamelCase = self.data_collator(__a )
lowerCamelCase , lowerCamelCase , lowerCamelCase = self.train_step_fn(__a , __a , **__a )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
lowerCamelCase = jax_utils.unreplicate(state.step )
lowerCamelCase = running_loss.item() / i
lowerCamelCase = self.scheduler_fn(state_step - 1 )
lowerCamelCase = self.evaluate(__a , __a )
lowerCamelCase = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(__a ) )
self.logger.log(__a , commit=__a )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=__a )
def _a (self , __a , __a ):
'''simple docstring'''
lowerCamelCase = get_batched_dataset(__a , self.args.batch_size )
lowerCamelCase = len(__a ) // self.args.batch_size
lowerCamelCase = jnp.array(0 , dtype=jnp.floataa )
lowerCamelCase = 0
for batch in tqdm(__a , total=__a , desc="Evaluating ... " ):
lowerCamelCase = self.data_collator(__a )
lowerCamelCase = self.val_step_fn(__a , **__a )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def _a (self , __a , __a ):
'''simple docstring'''
lowerCamelCase = jax_utils.unreplicate(__a )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... " )
self.model_save_fn(__a , params=state.params )
with open(os.path.join(__a , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__a , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(__a , "data_collator.joblib" ) )
with open(os.path.join(__a , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , __a )
print("DONE" )
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(_snake_case , "flax_model.msgpack" ) , "rb" ) as f:
lowerCamelCase = from_bytes(state.params , f.read() )
with open(os.path.join(_snake_case , "opt_state.msgpack" ) , "rb" ) as f:
lowerCamelCase = from_bytes(state.opt_state , f.read() )
lowerCamelCase = joblib.load(os.path.join(_snake_case , "args.joblib" ) )
lowerCamelCase = joblib.load(os.path.join(_snake_case , "data_collator.joblib" ) )
with open(os.path.join(_snake_case , "training_state.json" ) , "r" ) as f:
lowerCamelCase = json.load(_snake_case )
lowerCamelCase = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
lowerCamelCase = num_train_steps - warmup_steps
lowerCamelCase = optax.linear_schedule(init_value=_snake_case , end_value=_snake_case , transition_steps=_snake_case )
lowerCamelCase = optax.linear_schedule(init_value=_snake_case , end_value=1E-7 , transition_steps=_snake_case )
lowerCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
def weight_decay_mask(UpperCAmelCase__ ):
lowerCamelCase = traverse_util.flatten_dict(_snake_case )
lowerCamelCase = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(_snake_case )
lowerCamelCase = scheduler_fn(_snake_case , _snake_case , _snake_case , _snake_case )
lowerCamelCase = optax.adamw(learning_rate=_snake_case , weight_decay=_snake_case , mask=_snake_case )
return tx, lr | 708 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : List[str] = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 484 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : Union[str, Any] = logging.get_logger(__name__)
lowercase_ : Any = {
'''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''',
}
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = """timesformer"""
def __init__( self , snake_case__=224 , snake_case__=16 , snake_case__=3 , snake_case__=8 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.0 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1E-6 , snake_case__=True , snake_case__="divided_space_time" , snake_case__=0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**snake_case__ )
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : Optional[int] = patch_size
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : Any = num_frames
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : Any = num_attention_heads
_SCREAMING_SNAKE_CASE : Tuple = intermediate_size
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[str] = initializer_range
_SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = qkv_bias
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_type
_SCREAMING_SNAKE_CASE : str = drop_path_rate
| 572 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _lowerCAmelCase ( lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> tuple[complex, complex]:
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
_SCREAMING_SNAKE_CASE : List[Any] = b * b - 4 * a * c
_SCREAMING_SNAKE_CASE : Optional[int] = (-b + sqrt(lowerCamelCase__ )) / (2 * a)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (-b - sqrt(lowerCamelCase__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _lowerCAmelCase ( ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = quadratic_roots(a=5, b=6, c=1 )
print(f'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 572 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=7 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=False , UpperCAmelCase_=True , UpperCAmelCase_=99 , UpperCAmelCase_=64 , UpperCAmelCase_=5 , UpperCAmelCase_=4 , UpperCAmelCase_=64 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=5_12 , UpperCAmelCase_=16 , UpperCAmelCase_=2 , UpperCAmelCase_=0.02 , UpperCAmelCase_=3 , UpperCAmelCase_=4 , UpperCAmelCase_=None , ):
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __snake_case ( self ):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def __snake_case ( self ):
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = MPNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = MPNetForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = self.num_labels
lowerCAmelCase = MPNetForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = self.num_choices
lowerCAmelCase = MPNetForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase = self.num_labels
lowerCAmelCase = MPNetForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self ):
lowerCAmelCase = self.prepare_config_and_inputs()
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : int =(
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__a : Any =(
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : int =False
__a : List[str] =True
def __snake_case ( self ):
lowerCAmelCase = MPNetModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase_ )
def __snake_case ( self ):
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase_ )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self ):
lowerCAmelCase = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
lowerCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowerCAmelCase = model(UpperCAmelCase_ )[0]
lowerCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , UpperCAmelCase_ )
lowerCAmelCase = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 33 |
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
def count_of_possible_combinations(_snake_case ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_snake_case )
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
def count_of_possible_combinations_with_dp_array(
_snake_case , _snake_case ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowerCAmelCase = sum(
count_of_possible_combinations_with_dp_array(target - item , _snake_case )
for item in array )
lowerCAmelCase = answer
return answer
lowerCAmelCase = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_snake_case , _snake_case )
def UpperCAmelCase ( _snake_case , _snake_case , _snake_case ):
lowerCAmelCase = [0] * (target + 1)
lowerCAmelCase = 1
for i in range(1 , target + 1 ):
for j in range(_snake_case ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ =3
UpperCAmelCase_ =5
UpperCAmelCase_ =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 33 | 1 |
import cmath
import math
def UpperCamelCase_( _A :float , _A :float , _A :float , _A :float )-> complex:
UpperCamelCase__ = math.radians(_A )
UpperCamelCase__ = math.radians(_A )
# Convert voltage and current to rectangular form
UpperCamelCase__ = cmath.rect(_A , _A )
UpperCamelCase__ = cmath.rect(_A , _A )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCamelCase = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__UpperCamelCase = {
'google/bigbird-roberta-base': 4_0_9_6,
'google/bigbird-roberta-large': 4_0_9_6,
'google/bigbird-base-trivia-itc': 4_0_9_6,
}
__UpperCamelCase = '▁'
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = BigBirdTokenizer
_UpperCamelCase : Dict = ['input_ids', 'attention_mask']
_UpperCamelCase : List[int] = []
def __init__( self , snake_case=None , snake_case=None , snake_case="<unk>" , snake_case="<s>" , snake_case="</s>" , snake_case="<pad>" , snake_case="[SEP]" , snake_case="[MASK]" , snake_case="[CLS]" , **snake_case , ):
'''simple docstring'''
UpperCamelCase__ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
UpperCamelCase__ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
UpperCamelCase__ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
UpperCamelCase__ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
UpperCamelCase__ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
UpperCamelCase__ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
snake_case , tokenizer_file=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , **snake_case , )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = False if not self.vocab_file else True
def snake_case__ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self , snake_case , snake_case = None , snake_case = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1] + ([0] * len(snake_case )) + [1]
def snake_case__ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , snake_case , snake_case = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase__ = os.path.join(
snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 551 | 1 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( _a , _a ):
@register_to_config
def __init__( self : int , *,
lowerCAmelCase : int = 4 , lowerCAmelCase : int = 768 , lowerCAmelCase : int , lowerCAmelCase : int , ):
super().__init__()
lowerCAmelCase = nn.Parameter(torch.zeros(lowerCAmelCase ) )
# parameters for additional clip time embeddings
lowerCAmelCase = nn.Linear(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = nn.Linear(lowerCAmelCase , lowerCAmelCase )
# parameters for encoder hidden states
lowerCAmelCase = clip_extra_context_tokens
lowerCAmelCase = nn.Linear(
lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
lowerCAmelCase = nn.Linear(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = nn.LayerNorm(lowerCAmelCase )
def __lowercase ( self : int , *, lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowerCAmelCase = image_embeddings.shape[0]
lowerCAmelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowerCAmelCase = classifier_free_guidance_embeddings.expand(
lowerCAmelCase , -1 )
lowerCAmelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowerCAmelCase = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowerCAmelCase = self.embedding_proj(lowerCAmelCase )
lowerCAmelCase = self.clip_image_embeddings_project_to_time_embeddings(lowerCAmelCase )
lowerCAmelCase = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowerCAmelCase = self.clip_extra_context_tokens_proj(lowerCAmelCase )
lowerCAmelCase = clip_extra_context_tokens.reshape(lowerCAmelCase , -1 , self.clip_extra_context_tokens )
lowerCAmelCase = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowerCAmelCase = self.encoder_hidden_states_proj(lowerCAmelCase )
lowerCAmelCase = self.text_encoder_hidden_states_norm(lowerCAmelCase )
lowerCAmelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 529 |
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def lowercase (snake_case__ : str , snake_case__ : Tuple , snake_case__ : Dict ) -> int:
'''simple docstring'''
lowerCAmelCase = os.path.abspath(snake_case__ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
lowerCAmelCase = tf.train.list_variables(snake_case__ )
lowerCAmelCase = []
lowerCAmelCase = []
lowerCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowerCAmelCase = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
lowerCAmelCase = name[1:]
# figure out how many levels deep the name is
lowerCAmelCase = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(snake_case__ )
# read data
lowerCAmelCase = tf.train.load_variable(snake_case__ , snake_case__ )
names.append("""/""".join(snake_case__ ) )
arrays.append(snake_case__ )
logger.info(f'''Read a total of {len(snake_case__ ):,} layers''' )
# Sanity check
if len(set(snake_case__ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(snake_case__ ) )})''' )
lowerCAmelCase = list(set(snake_case__ ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(snake_case__ , snake_case__ ):
lowerCAmelCase = full_name.split("""/""" )
lowerCAmelCase = model
lowerCAmelCase = []
for i, m_name in enumerate(snake_case__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
lowerCAmelCase = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
lowerCAmelCase = getattr(snake_case__ , """embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
lowerCAmelCase = getattr(snake_case__ , """encoder""" )
lowerCAmelCase = getattr(snake_case__ , """layer""" )
lowerCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """pooler""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
lowerCAmelCase = getattr(snake_case__ , """token_type_embeddings""" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("""weight""" )
lowerCAmelCase = getattr(snake_case__ , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
lowerCAmelCase = getattr(snake_case__ , """attention""" )
lowerCAmelCase = getattr(snake_case__ , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
lowerCAmelCase = getattr(snake_case__ , """attention""" )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """attention""" )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
lowerCAmelCase = getattr(snake_case__ , """output""" )
lowerCAmelCase = getattr(snake_case__ , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
lowerCAmelCase = getattr(snake_case__ , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
lowerCAmelCase = getattr(snake_case__ , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
lowerCAmelCase = getattr(snake_case__ , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
lowerCAmelCase = getattr(snake_case__ , """intermediate""" )
lowerCAmelCase = getattr(snake_case__ , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
lowerCAmelCase = getattr(snake_case__ , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
lowerCAmelCase = getattr(snake_case__ , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
lowerCAmelCase = getattr(snake_case__ , """weight""" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
lowerCAmelCase = """.""".join(snake_case__ )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , snake_case__ ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , snake_case__ ):
lowerCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowerCAmelCase = array.transpose()
if pointer.shape == array.shape:
lowerCAmelCase = torch.from_numpy(snake_case__ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def lowercase (snake_case__ : Tuple , snake_case__ : Any , snake_case__ : Any ) -> str:
'''simple docstring'''
logger.info(f'''Loading model based on config from {config_path}...''' )
lowerCAmelCase = BertConfig.from_json_file(snake_case__ )
lowerCAmelCase = BertModel(snake_case__ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
a = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 529 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__A : int = "Create a default config file for Accelerate with only a few flags set."
def UpperCamelCase_ ( A__ : Any="no" , A__ : str = default_json_config_file , A__ : bool = False ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = Path(A__ )
path.parent.mkdir(parents=A__ , exist_ok=A__ )
if path.exists():
print(
f'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
lowerCAmelCase_ : List[str] = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
lowerCAmelCase_ : Any = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase_ : List[str] = torch.cuda.device_count()
lowerCAmelCase_ : Optional[int] = num_gpus
lowerCAmelCase_ : Union[str, Any] = False
if num_gpus > 1:
lowerCAmelCase_ : Any = """MULTI_GPU"""
else:
lowerCAmelCase_ : Dict = """NO"""
elif is_xpu_available() and use_xpu:
lowerCAmelCase_ : Any = torch.xpu.device_count()
lowerCAmelCase_ : Optional[int] = num_xpus
lowerCAmelCase_ : List[str] = False
if num_xpus > 1:
lowerCAmelCase_ : Any = """MULTI_XPU"""
else:
lowerCAmelCase_ : str = """NO"""
elif is_npu_available():
lowerCAmelCase_ : Tuple = torch.npu.device_count()
lowerCAmelCase_ : List[Any] = num_npus
lowerCAmelCase_ : str = False
if num_npus > 1:
lowerCAmelCase_ : Dict = """MULTI_NPU"""
else:
lowerCAmelCase_ : Optional[int] = """NO"""
else:
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : str = 1
lowerCAmelCase_ : Optional[Any] = """NO"""
lowerCAmelCase_ : Optional[int] = ClusterConfig(**A__ )
config.to_json_file(A__ )
return path
def UpperCamelCase_ ( A__ : Dict , A__ : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = parser.add_parser("""default""" , parents=A__ , help=A__ , formatter_class=A__ )
parser.add_argument(
"""--config_file""" , default=A__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=A__ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=A__ )
return parser
def UpperCamelCase_ ( A__ : List[str] ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'accelerate configuration saved at {config_file}' )
| 275 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : List[Any] = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'roberta-prelayernorm'
def __init__( self : Tuple , lowerCamelCase : Tuple=5_02_65 , lowerCamelCase : Optional[int]=7_68 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : Optional[int]=12 , lowerCamelCase : int=30_72 , lowerCamelCase : Optional[int]="gelu" , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Optional[int]=5_12 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : int=0.02 , lowerCamelCase : Any=1E-12 , lowerCamelCase : int=1 , lowerCamelCase : List[Any]=0 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Optional[Any]="absolute" , lowerCamelCase : List[Any]=True , lowerCamelCase : Tuple=None , **lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Dict = vocab_size
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Dict = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : List[Any] = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : List[str] = position_embedding_type
lowerCAmelCase_ : Tuple = use_cache
lowerCAmelCase_ : Union[str, Any] = classifier_dropout
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@property
def __lowercase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 275 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class __UpperCAmelCase ( __UpperCAmelCase ):
'''simple docstring'''
lowercase : str = field(default="image-classification", metadata={"include_in_asdict_even_if_is_default": True} )
lowercase : ClassVar[Features] = Features({"image": Image()} )
lowercase : ClassVar[Features] = Features({"labels": ClassLabel} )
lowercase : str = "image"
lowercase : str = "labels"
def UpperCamelCase_ ( self , _A ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , UpperCAmelCase_ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_SCREAMING_SNAKE_CASE =copy.deepcopy(self )
_SCREAMING_SNAKE_CASE =self.label_schema.copy()
_SCREAMING_SNAKE_CASE =features[self.label_column]
_SCREAMING_SNAKE_CASE =label_schema
return task_template
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 711 |
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ : List[Any] = 2_5_6
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Any = ["melgan"]
def __init__( self , _A , _A , _A , _A , _A , ):
'''simple docstring'''
super().__init__()
# From MELGAN
_SCREAMING_SNAKE_CASE =math.log(1E-5 ) # Matches MelGAN training.
_SCREAMING_SNAKE_CASE =4.0 # Largest value for most examples
_SCREAMING_SNAKE_CASE =1_2_8
self.register_modules(
notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , )
def UpperCamelCase_ ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =output_range
if clip:
_SCREAMING_SNAKE_CASE =torch.clip(_A , self.min_value , self.max_value )
# Scale to [0, 1].
_SCREAMING_SNAKE_CASE =(features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase_ ( self , _A , _A=(-1.0, 1.0) , _A=False ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =input_range
_SCREAMING_SNAKE_CASE =torch.clip(_A , _A , _A ) if clip else outputs
# Scale to [0, 1].
_SCREAMING_SNAKE_CASE =(outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =input_tokens > 0
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.notes_encoder(
encoder_input_tokens=_A , encoder_inputs_mask=_A )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.continuous_encoder(
encoder_inputs=_A , encoder_inputs_mask=_A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase_ ( self , _A , _A , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =noise_time
if not torch.is_tensor(_A ):
_SCREAMING_SNAKE_CASE =torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE =timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_SCREAMING_SNAKE_CASE =timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_SCREAMING_SNAKE_CASE =self.decoder(
encodings_and_masks=_A , decoder_input_tokens=_A , decoder_noise_time=_A )
return logits
@torch.no_grad()
def __call__( self , _A , _A = None , _A = 1_0_0 , _A = True , _A = "numpy" , _A = None , _A = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_A )}.""" )
_SCREAMING_SNAKE_CASE =np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_SCREAMING_SNAKE_CASE =np.zeros([1, 0, self.n_dims] , np.floataa )
_SCREAMING_SNAKE_CASE =torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
for i, encoder_input_tokens in enumerate(_A ):
if i == 0:
_SCREAMING_SNAKE_CASE =torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_SCREAMING_SNAKE_CASE =torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_SCREAMING_SNAKE_CASE =ones
_SCREAMING_SNAKE_CASE =self.scale_features(
_A , output_range=[-1.0, 1.0] , clip=_A )
_SCREAMING_SNAKE_CASE =self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_A , continuous_mask=_A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_SCREAMING_SNAKE_CASE =randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_SCREAMING_SNAKE_CASE =self.decode(
encodings_and_masks=_A , input_tokens=_A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_SCREAMING_SNAKE_CASE =self.scheduler.step(_A , _A , _A , generator=_A ).prev_sample
_SCREAMING_SNAKE_CASE =self.scale_to_features(_A , input_range=[-1.0, 1.0] )
_SCREAMING_SNAKE_CASE =mel[:1]
_SCREAMING_SNAKE_CASE =mel.cpu().float().numpy()
_SCREAMING_SNAKE_CASE =np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A )
logger.info('''Generated segment''' , _A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
_SCREAMING_SNAKE_CASE =self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_SCREAMING_SNAKE_CASE =full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_A )
| 165 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[Any] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
_lowerCAmelCase : Any = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
_lowerCAmelCase : Optional[Any] = '</w>'
_lowerCAmelCase : str = '@@ '
def a_ ( UpperCamelCase_ : Dict ) -> int:
"""simple docstring"""
lowerCamelCase = set()
lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase : Tuple = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class lowerCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , __snake_case : Optional[Any] , __snake_case : int="<s>" , __snake_case : Dict="<pad>" , __snake_case : List[str]="</s>" , __snake_case : str="<unk>" , __snake_case : Optional[int]=False , __snake_case : List[Any]=None , **__snake_case : Any , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , do_lower_case=__snake_case , **__snake_case , )
lowerCamelCase = do_lower_case
with open(__snake_case , encoding='utf-8' ) as vocab_handle:
lowerCamelCase = json.load(__snake_case )
lowerCamelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
lowerCamelCase = None
lowerCamelCase = None
else:
with open(__snake_case , encoding='utf-8' ) as merges_handle:
lowerCamelCase = merges_handle.read().split('\n' )[:-1]
lowerCamelCase = [tuple(merge.split()[:2] ) for merge in merges]
lowerCamelCase = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
lowerCamelCase = {}
@property
def lowerCamelCase__ ( self : int ) -> int:
'''simple docstring'''
return len(self.decoder )
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : str , __snake_case : Dict ) -> Tuple:
'''simple docstring'''
lowerCamelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCamelCase = get_pairs(__snake_case )
if not pairs:
return token
while True:
lowerCamelCase = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase = bigram
lowerCamelCase = []
lowerCamelCase = 0
while i < len(__snake_case ):
try:
lowerCamelCase = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase = tuple(__snake_case )
lowerCamelCase = new_word
if len(__snake_case ) == 1:
break
else:
lowerCamelCase = get_pairs(__snake_case )
lowerCamelCase = ' '.join(__snake_case )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCamelCase = '\n' + BPE_TOKEN_MERGES
if word.endswith(__snake_case ):
lowerCamelCase = word.replace(__snake_case , '' )
lowerCamelCase = word.replace(' ' , __snake_case )
lowerCamelCase = word
return word
def lowerCamelCase__ ( self : List[Any] , __snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
lowerCamelCase = text.lower()
lowerCamelCase = text.split()
lowerCamelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__snake_case ).split(' ' ) ) )
return split_tokens
def lowerCamelCase__ ( self : Any , __snake_case : str ) -> int:
'''simple docstring'''
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : Any , __snake_case : int ) -> str:
'''simple docstring'''
lowerCamelCase = self.decoder.get(__snake_case , self.unk_token )
return result
def lowerCamelCase__ ( self : List[str] , __snake_case : List[str] ) -> str:
'''simple docstring'''
lowerCamelCase = ' '.join(__snake_case )
# make sure @@ tokens are concatenated
lowerCamelCase = ''.join(string.split(__snake_case ) )
return string
def lowerCamelCase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(__snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '\n' )
lowerCamelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__snake_case , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
lowerCamelCase = token_index
writer.write(' '.join(__snake_case ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 246 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowercase : int = 'examples/'
lowercase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowercase : Union[str, Any] = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowercase : Union[str, Any] = 'README.md'
def __a ( A__ , A__ , A__ ) -> Dict:
with open(A__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase , lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace("VERSION" , A__ )
lowerCAmelCase = re_pattern.sub(A__ , A__ )
with open(A__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(A__ )
def __a ( A__ ) -> List[Any]:
for folder, directories, fnames in os.walk(A__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(A__ , A__ ) , A__ , pattern="examples" )
def __a ( A__ , A__=False ) -> Tuple:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A__ , A__ , A__ )
if not patch:
update_version_in_examples(A__ )
def __a ( ) -> List[str]:
lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
lowerCAmelCase = "1. Want to contribute a new model?"
with open(A__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(A__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(A__ )
def __a ( ) -> Optional[Any]:
with open(REPLACE_FILES["init"] , "r" ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(A__ ).groups()[0]
return packaging.version.parse(A__ )
def __a ( A__=False ) -> Optional[int]:
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(A__ ) == 0:
lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(A__ , patch=A__ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def __a ( ) -> Tuple:
lowerCAmelCase = get_version()
lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(A__ ) == 0:
lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(A__ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowercase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 649 | 0 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowerCAmelCase_ ( lowercase: Any ) -> Any:
'''simple docstring'''
return x + 2
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = """x = 3"""
_UpperCamelCase: str = {}
_UpperCamelCase: Optional[Any] = evaluate(a_ , {} , state=a_ )
assert result == 3
self.assertDictEqual(a_ , {'''x''': 3} )
_UpperCamelCase: Union[str, Any] = """x = y"""
_UpperCamelCase: Optional[int] = {"""y""": 5}
_UpperCamelCase: str = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a_ , {'''x''': 5, '''y''': 5} )
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase: Any = """y = add_two(x)"""
_UpperCamelCase: Optional[Any] = {"""x""": 3}
_UpperCamelCase: Union[str, Any] = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
assert result == 5
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
_UpperCamelCase: List[Any] = evaluate(a_ , {} , state=a_ )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: Tuple = """x = 3"""
_UpperCamelCase: Dict = {}
_UpperCamelCase: str = evaluate(a_ , {} , state=a_ )
assert result == 3
self.assertDictEqual(a_ , {'''x''': 3} )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase: Any = """test_dict = {'x': x, 'y': add_two(x)}"""
_UpperCamelCase: Union[str, Any] = {"""x""": 3}
_UpperCamelCase: Optional[Any] = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(a_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: List[str] = """x = 3\ny = 5"""
_UpperCamelCase: Dict = {}
_UpperCamelCase: str = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 5} )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: int = """text = f'This is x: {x}.'"""
_UpperCamelCase: int = {"""x""": 3}
_UpperCamelCase: Union[str, Any] = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(a_ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = """if x <= 3:\n y = 2\nelse:\n y = 5"""
_UpperCamelCase: Dict = {"""x""": 3}
_UpperCamelCase: int = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 2} )
_UpperCamelCase: Optional[Any] = {"""x""": 8}
_UpperCamelCase: Dict = evaluate(a_ , {} , state=a_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a_ , {'''x''': 8, '''y''': 5} )
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: List[Any] = """test_list = [x, add_two(x)]"""
_UpperCamelCase: Dict = {"""x""": 3}
_UpperCamelCase: str = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
self.assertListEqual(a_ , [3, 5] )
self.assertDictEqual(a_ , {'''x''': 3, '''test_list''': [3, 5]} )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = """y = x"""
_UpperCamelCase: List[Any] = {"""x""": 3}
_UpperCamelCase: List[Any] = evaluate(a_ , {} , state=a_ )
assert result == 3
self.assertDictEqual(a_ , {'''x''': 3, '''y''': 3} )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase: int = """test_list = [x, add_two(x)]\ntest_list[1]"""
_UpperCamelCase: Any = {"""x""": 3}
_UpperCamelCase: Optional[Any] = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
assert result == 5
self.assertDictEqual(a_ , {'''x''': 3, '''test_list''': [3, 5]} )
_UpperCamelCase: Any = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
_UpperCamelCase: List[str] = {"""x""": 3}
_UpperCamelCase: Union[str, Any] = evaluate(a_ , {'''add_two''': add_two} , state=a_ )
assert result == 5
self.assertDictEqual(a_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = """x = 0\nfor i in range(3):\n x = i"""
_UpperCamelCase: Union[str, Any] = {}
_UpperCamelCase: List[Any] = evaluate(a_ , {'''range''': range} , state=a_ )
assert result == 2
self.assertDictEqual(a_ , {'''x''': 2, '''i''': 2} ) | 706 | import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def lowerCAmelCase_ ( lowercase: SplitDict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase: List[str] = split_dict._to_yaml_list()
assert len(lowercase ) == len(lowercase )
_UpperCamelCase: Tuple = SplitDict._from_yaml_list(lowercase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_UpperCamelCase: Optional[Any] = None
# the split name of split_dict takes over the name of the split info object
_UpperCamelCase: Any = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=lowercase ), SplitInfo(dataset_name='''my_dataset''' )] )
def lowerCAmelCase_ ( lowercase: str ) -> Tuple:
'''simple docstring'''
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
_UpperCamelCase: Optional[Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name | 264 | 0 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase : str = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase : Optional[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCamelCase : Tuple = spec.loader.load_module()
UpperCamelCase : str = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase : List[str] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
UpperCamelCase : int = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
A = []
for config_class in list(CONFIG_MAPPING.values() ):
A = False
# source code of `config_class`
A = inspect.getsource(UpperCamelCase__ )
A = _re_checkpoint.findall(UpperCamelCase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
A , A = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
A = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
A = True
break
A = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
A = '\n'.join(sorted(UpperCamelCase__ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 690 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 1 |
'''simple docstring'''
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _lowerCAmelCase ( lowercase : bool = True , *lowercase : Any , **lowercase : List[Any] ) ->Optional[Any]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
lowercase__ = False
if main_process_only:
lowercase__ = PartialState().local_process_index == 0
return _tqdm(*__a , **__a , disable=__a )
| 720 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 318 | 0 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : str = 'owlvit_text_model'
def __init__(self , A=49_408 , A=512 , A=2_048 , A=12 , A=8 , A=16 , A="quick_gelu" , A=1E-5 , A=0.0 , A=0.02 , A=1.0 , A=0 , A=49_406 , A=49_407 , **A , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_a = vocab_size
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = max_position_embeddings
_a = hidden_act
_a = layer_norm_eps
_a = attention_dropout
_a = initializer_range
_a = initializer_factor
@classmethod
def a__ (cls , A , **A ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A )
_a , _a = cls.get_config_dict(A , **A )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_a = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = 'owlvit_vision_model'
def __init__(self , A=768 , A=3_072 , A=12 , A=12 , A=3 , A=768 , A=32 , A="quick_gelu" , A=1E-5 , A=0.0 , A=0.02 , A=1.0 , **A , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**A )
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
_a = num_channels
_a = image_size
_a = patch_size
_a = hidden_act
_a = layer_norm_eps
_a = attention_dropout
_a = initializer_range
_a = initializer_factor
@classmethod
def a__ (cls , A , **A ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A )
_a , _a = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = 'owlvit'
__lowerCamelCase : List[str] = True
def __init__(self , A=None , A=None , A=512 , A=2.6592 , A=True , **A , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**A )
if text_config is None:
_a = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
_a = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
_a = OwlViTTextConfig(**A )
_a = OwlViTVisionConfig(**A )
_a = projection_dim
_a = logit_scale_init_value
_a = return_dict
_a = 1.0
@classmethod
def a__ (cls , A , **A ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(A )
_a , _a = cls.get_config_dict(A , **A )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(A , **A )
@classmethod
def a__ (cls , A , A , **A ) -> Any:
"""simple docstring"""
_a = {}
_a = text_config
_a = vision_config
return cls.from_dict(A , **A )
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = copy.deepcopy(self.__dict__ )
_a = self.text_config.to_dict()
_a = self.vision_config.to_dict()
_a = self.__class__.model_type
return output
class __A ( A ):
'''simple docstring'''
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def a__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def a__ (self ) -> float:
"""simple docstring"""
return 1E-4
def a__ (self , A , A = -1 , A = -1 , A = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_a = super().generate_dummy_inputs(
processor.tokenizer , batch_size=A , seq_length=A , framework=A )
_a = super().generate_dummy_inputs(
processor.image_processor , batch_size=A , framework=A )
return {**text_input_dict, **image_input_dict}
@property
def a__ (self ) -> int:
"""simple docstring"""
return 14
| 11 | from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __UpperCamelCase ( A ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__magic_name__ ='''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class _A ( __UpperCamelCase ):
@staticmethod
def _a (SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=SCREAMING_SNAKE_CASE_ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"Loading model {model_type}" )
UpperCamelCase__ = model_type
UpperCamelCase__ = tf_checkpoint
UpperCamelCase__ = pytorch_dump_output
UpperCamelCase__ = config
UpperCamelCase__ = finetuning_task_name
def _a (self ) -> Tuple:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase__ = self._tf_checkpoint
UpperCamelCase__ = ''''''
else:
UpperCamelCase__ = self._tf_checkpoint
UpperCamelCase__ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
SCREAMING_SNAKE_CASE_ , self._config , self._pytorch_dump_output , SCREAMING_SNAKE_CASE_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 415 | 0 |
UpperCAmelCase = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 719 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCAmelCase = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 351 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.